cf70744a3d54c6f1d8238d229e2e6a939c57a2d2
[platform/framework/web/crosswalk.git] / src / base / debug / trace_event_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/trace_event_impl.h"
6
7 #include <algorithm>
8
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
36
37 #if defined(OS_WIN)
38 #include "base/debug/trace_event_win.h"
39 #endif
40
41 class DeleteTraceLogForTesting {
42  public:
43   static void Delete() {
44     Singleton<base::debug::TraceLog,
45               LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
46   }
47 };
48
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
51
52 namespace base {
53 namespace debug {
54
55 namespace {
56
57 // The overhead of TraceEvent above this threshold will be reported in the
58 // trace.
59 const int kOverheadReportThresholdInMicroseconds = 50;
60
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
64 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
65 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
66 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
71
72 const int kThreadFlushTimeoutMs = 3000;
73
74 #if !defined(OS_NACL)
75 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
76 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
77 #endif
78
79 const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
80
81 #define MAX_CATEGORY_GROUPS 100
82
83 // Parallel arrays g_category_groups and g_category_group_enabled are separate
84 // so that a pointer to a member of g_category_group_enabled can be easily
85 // converted to an index into g_category_groups. This allows macros to deal
86 // only with char enabled pointers from g_category_group_enabled, and we can
87 // convert internally to determine the category name from the char enabled
88 // pointer.
89 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
90   "toplevel",
91   "tracing already shutdown",
92   "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
93   "__metadata",
94   // For reporting trace_event overhead. For thread local event buffers only.
95   "trace_event_overhead"};
96
97 // The enabled flag is char instead of bool so that the API can be used from C.
98 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
99 // Indexes here have to match the g_category_groups array indexes above.
100 const int g_category_already_shutdown = 1;
101 const int g_category_categories_exhausted = 2;
102 const int g_category_metadata = 3;
103 const int g_category_trace_event_overhead = 4;
104 const int g_num_builtin_categories = 5;
105 int g_category_index = g_num_builtin_categories; // Skip default categories.
106
107 // The name of the current thread. This is used to decide if the current
108 // thread name has changed. We combine all the seen thread names into the
109 // output name for the thread.
110 LazyInstance<ThreadLocalPointer<const char> >::Leaky
111     g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
112
113 TimeTicks ThreadNow() {
114   return TimeTicks::IsThreadNowSupported() ?
115       TimeTicks::ThreadNow() : TimeTicks();
116 }
117
118 class TraceBufferRingBuffer : public TraceBuffer {
119  public:
120   TraceBufferRingBuffer(size_t max_chunks)
121       : max_chunks_(max_chunks),
122         recyclable_chunks_queue_(new size_t[queue_capacity()]),
123         queue_head_(0),
124         queue_tail_(max_chunks),
125         current_iteration_index_(0),
126         current_chunk_seq_(1) {
127     chunks_.reserve(max_chunks);
128     for (size_t i = 0; i < max_chunks; ++i)
129       recyclable_chunks_queue_[i] = i;
130   }
131
132   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
133     // Because the number of threads is much less than the number of chunks,
134     // the queue should never be empty.
135     DCHECK(!QueueIsEmpty());
136
137     *index = recyclable_chunks_queue_[queue_head_];
138     queue_head_ = NextQueueIndex(queue_head_);
139     current_iteration_index_ = queue_head_;
140
141     if (*index >= chunks_.size())
142       chunks_.resize(*index + 1);
143
144     TraceBufferChunk* chunk = chunks_[*index];
145     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
146     if (chunk)
147       chunk->Reset(current_chunk_seq_++);
148     else
149       chunk = new TraceBufferChunk(current_chunk_seq_++);
150
151     return scoped_ptr<TraceBufferChunk>(chunk);
152   }
153
154   virtual void ReturnChunk(size_t index,
155                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
156     // When this method is called, the queue should not be full because it
157     // can contain all chunks including the one to be returned.
158     DCHECK(!QueueIsFull());
159     DCHECK(chunk);
160     DCHECK_LT(index, chunks_.size());
161     DCHECK(!chunks_[index]);
162     chunks_[index] = chunk.release();
163     recyclable_chunks_queue_[queue_tail_] = index;
164     queue_tail_ = NextQueueIndex(queue_tail_);
165   }
166
167   virtual bool IsFull() const OVERRIDE {
168     return false;
169   }
170
171   virtual size_t Size() const OVERRIDE {
172     // This is approximate because not all of the chunks are full.
173     return chunks_.size() * kTraceBufferChunkSize;
174   }
175
176   virtual size_t Capacity() const OVERRIDE {
177     return max_chunks_ * kTraceBufferChunkSize;
178   }
179
180   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
181     if (handle.chunk_index >= chunks_.size())
182       return NULL;
183     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
184     if (!chunk || chunk->seq() != handle.chunk_seq)
185       return NULL;
186     return chunk->GetEventAt(handle.event_index);
187   }
188
189   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
190     if (chunks_.empty())
191       return NULL;
192
193     while (current_iteration_index_ != queue_tail_) {
194       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
195       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
196       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
197         continue;
198       DCHECK(chunks_[chunk_index]);
199       return chunks_[chunk_index];
200     }
201     return NULL;
202   }
203
204   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
205     scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
206     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
207         queue_index = NextQueueIndex(queue_index)) {
208       size_t chunk_index = recyclable_chunks_queue_[queue_index];
209       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
210         continue;
211       TraceBufferChunk* chunk = chunks_[chunk_index];
212       cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
213     }
214     return cloned_buffer.PassAs<TraceBuffer>();
215   }
216
217  private:
218   class ClonedTraceBuffer : public TraceBuffer {
219    public:
220     ClonedTraceBuffer() : current_iteration_index_(0) {}
221
222     // The only implemented method.
223     virtual const TraceBufferChunk* NextChunk() OVERRIDE {
224       return current_iteration_index_ < chunks_.size() ?
225           chunks_[current_iteration_index_++] : NULL;
226     }
227
228     virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
229       NOTIMPLEMENTED();
230       return scoped_ptr<TraceBufferChunk>();
231     }
232     virtual void ReturnChunk(size_t index,
233                              scoped_ptr<TraceBufferChunk>) OVERRIDE {
234       NOTIMPLEMENTED();
235     }
236     virtual bool IsFull() const OVERRIDE { return false; }
237     virtual size_t Size() const OVERRIDE { return 0; }
238     virtual size_t Capacity() const OVERRIDE { return 0; }
239     virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
240       return NULL;
241     }
242     virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
243       NOTIMPLEMENTED();
244       return scoped_ptr<TraceBuffer>();
245     }
246
247     size_t current_iteration_index_;
248     ScopedVector<TraceBufferChunk> chunks_;
249   };
250
251   bool QueueIsEmpty() const {
252     return queue_head_ == queue_tail_;
253   }
254
255   size_t QueueSize() const {
256     return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
257         queue_tail_ + queue_capacity() - queue_head_;
258   }
259
260   bool QueueIsFull() const {
261     return QueueSize() == queue_capacity() - 1;
262   }
263
264   size_t queue_capacity() const {
265     // One extra space to help distinguish full state and empty state.
266     return max_chunks_ + 1;
267   }
268
269   size_t NextQueueIndex(size_t index) const {
270     index++;
271     if (index >= queue_capacity())
272       index = 0;
273     return index;
274   }
275
276   size_t max_chunks_;
277   ScopedVector<TraceBufferChunk> chunks_;
278
279   scoped_ptr<size_t[]> recyclable_chunks_queue_;
280   size_t queue_head_;
281   size_t queue_tail_;
282
283   size_t current_iteration_index_;
284   uint32 current_chunk_seq_;
285
286   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
287 };
288
289 class TraceBufferVector : public TraceBuffer {
290  public:
291   TraceBufferVector()
292       : in_flight_chunk_count_(0),
293         current_iteration_index_(0) {
294     chunks_.reserve(kTraceEventVectorBufferChunks);
295   }
296
297   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
298     // This function may be called when adding normal events or indirectly from
299     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
300     // have to add the metadata events and flush thread-local buffers even if
301     // the buffer is full.
302     *index = chunks_.size();
303     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
304     ++in_flight_chunk_count_;
305     // + 1 because zero chunk_seq is not allowed.
306     return scoped_ptr<TraceBufferChunk>(
307         new TraceBufferChunk(static_cast<uint32>(*index) + 1));
308   }
309
310   virtual void ReturnChunk(size_t index,
311                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
312     DCHECK_GT(in_flight_chunk_count_, 0u);
313     DCHECK_LT(index, chunks_.size());
314     DCHECK(!chunks_[index]);
315     --in_flight_chunk_count_;
316     chunks_[index] = chunk.release();
317   }
318
319   virtual bool IsFull() const OVERRIDE {
320     return chunks_.size() >= kTraceEventVectorBufferChunks;
321   }
322
323   virtual size_t Size() const OVERRIDE {
324     // This is approximate because not all of the chunks are full.
325     return chunks_.size() * kTraceBufferChunkSize;
326   }
327
328   virtual size_t Capacity() const OVERRIDE {
329     return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
330   }
331
332   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
333     if (handle.chunk_index >= chunks_.size())
334       return NULL;
335     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
336     if (!chunk || chunk->seq() != handle.chunk_seq)
337       return NULL;
338     return chunk->GetEventAt(handle.event_index);
339   }
340
341   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
342     while (current_iteration_index_ < chunks_.size()) {
343       // Skip in-flight chunks.
344       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
345       if (chunk)
346         return chunk;
347     }
348     return NULL;
349   }
350
351   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
352     NOTIMPLEMENTED();
353     return scoped_ptr<TraceBuffer>();
354   }
355
356  private:
357   size_t in_flight_chunk_count_;
358   size_t current_iteration_index_;
359   ScopedVector<TraceBufferChunk> chunks_;
360
361   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
362 };
363
364 template <typename T>
365 void InitializeMetadataEvent(TraceEvent* trace_event,
366                              int thread_id,
367                              const char* metadata_name, const char* arg_name,
368                              const T& value) {
369   if (!trace_event)
370     return;
371
372   int num_args = 1;
373   unsigned char arg_type;
374   unsigned long long arg_value;
375   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
376   trace_event->Initialize(thread_id,
377                           TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
378                           &g_category_group_enabled[g_category_metadata],
379                           metadata_name, ::trace_event_internal::kNoEventId,
380                           num_args, &arg_name, &arg_type, &arg_value, NULL,
381                           TRACE_EVENT_FLAG_NONE);
382 }
383
384 class AutoThreadLocalBoolean {
385  public:
386   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
387       : thread_local_boolean_(thread_local_boolean) {
388     DCHECK(!thread_local_boolean_->Get());
389     thread_local_boolean_->Set(true);
390   }
391   ~AutoThreadLocalBoolean() {
392     thread_local_boolean_->Set(false);
393   }
394
395  private:
396   ThreadLocalBoolean* thread_local_boolean_;
397   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
398 };
399
400 }  // namespace
401
402 void TraceBufferChunk::Reset(uint32 new_seq) {
403   for (size_t i = 0; i < next_free_; ++i)
404     chunk_[i].Reset();
405   next_free_ = 0;
406   seq_ = new_seq;
407 }
408
409 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
410   DCHECK(!IsFull());
411   *event_index = next_free_++;
412   return &chunk_[*event_index];
413 }
414
415 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
416   scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
417   cloned_chunk->next_free_ = next_free_;
418   for (size_t i = 0; i < next_free_; ++i)
419     cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
420   return cloned_chunk.Pass();
421 }
422
423 // A helper class that allows the lock to be acquired in the middle of the scope
424 // and unlocks at the end of scope if locked.
425 class TraceLog::OptionalAutoLock {
426  public:
427   explicit OptionalAutoLock(Lock& lock)
428       : lock_(lock),
429         locked_(false) {
430   }
431
432   ~OptionalAutoLock() {
433     if (locked_)
434       lock_.Release();
435   }
436
437   void EnsureAcquired() {
438     if (!locked_) {
439       lock_.Acquire();
440       locked_ = true;
441     }
442   }
443
444  private:
445   Lock& lock_;
446   bool locked_;
447   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
448 };
449
450 // Use this function instead of TraceEventHandle constructor to keep the
451 // overhead of ScopedTracer (trace_event.h) constructor minimum.
452 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
453                 TraceEventHandle* handle) {
454   DCHECK(chunk_seq);
455   DCHECK(chunk_index < (1u << 16));
456   DCHECK(event_index < (1u << 16));
457   handle->chunk_seq = chunk_seq;
458   handle->chunk_index = static_cast<uint16>(chunk_index);
459   handle->event_index = static_cast<uint16>(event_index);
460 }
461
462 ////////////////////////////////////////////////////////////////////////////////
463 //
464 // TraceEvent
465 //
466 ////////////////////////////////////////////////////////////////////////////////
467
468 namespace {
469
470 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
471
472 // Copies |*member| into |*buffer|, sets |*member| to point to this new
473 // location, and then advances |*buffer| by the amount written.
474 void CopyTraceEventParameter(char** buffer,
475                              const char** member,
476                              const char* end) {
477   if (*member) {
478     size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
479     DCHECK_LE(static_cast<int>(written), end - *buffer);
480     *member = *buffer;
481     *buffer += written;
482   }
483 }
484
485 }  // namespace
486
487 TraceEvent::TraceEvent()
488     : duration_(TimeDelta::FromInternalValue(-1)),
489       id_(0u),
490       category_group_enabled_(NULL),
491       name_(NULL),
492       thread_id_(0),
493       phase_(TRACE_EVENT_PHASE_BEGIN),
494       flags_(0) {
495   for (int i = 0; i < kTraceMaxNumArgs; ++i)
496     arg_names_[i] = NULL;
497   memset(arg_values_, 0, sizeof(arg_values_));
498 }
499
500 TraceEvent::~TraceEvent() {
501 }
502
503 void TraceEvent::CopyFrom(const TraceEvent& other) {
504   timestamp_ = other.timestamp_;
505   thread_timestamp_ = other.thread_timestamp_;
506   duration_ = other.duration_;
507   id_ = other.id_;
508   category_group_enabled_ = other.category_group_enabled_;
509   name_ = other.name_;
510   thread_id_ = other.thread_id_;
511   phase_ = other.phase_;
512   flags_ = other.flags_;
513   parameter_copy_storage_ = other.parameter_copy_storage_;
514
515   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
516     arg_names_[i] = other.arg_names_[i];
517     arg_types_[i] = other.arg_types_[i];
518     arg_values_[i] = other.arg_values_[i];
519     convertable_values_[i] = other.convertable_values_[i];
520   }
521 }
522
523 void TraceEvent::Initialize(
524     int thread_id,
525     TimeTicks timestamp,
526     TimeTicks thread_timestamp,
527     char phase,
528     const unsigned char* category_group_enabled,
529     const char* name,
530     unsigned long long id,
531     int num_args,
532     const char** arg_names,
533     const unsigned char* arg_types,
534     const unsigned long long* arg_values,
535     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
536     unsigned char flags) {
537   timestamp_ = timestamp;
538   thread_timestamp_ = thread_timestamp;
539   duration_ = TimeDelta::FromInternalValue(-1);
540   id_ = id;
541   category_group_enabled_ = category_group_enabled;
542   name_ = name;
543   thread_id_ = thread_id;
544   phase_ = phase;
545   flags_ = flags;
546
547   // Clamp num_args since it may have been set by a third_party library.
548   num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
549   int i = 0;
550   for (; i < num_args; ++i) {
551     arg_names_[i] = arg_names[i];
552     arg_types_[i] = arg_types[i];
553
554     if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
555       convertable_values_[i] = convertable_values[i];
556     else
557       arg_values_[i].as_uint = arg_values[i];
558   }
559   for (; i < kTraceMaxNumArgs; ++i) {
560     arg_names_[i] = NULL;
561     arg_values_[i].as_uint = 0u;
562     convertable_values_[i] = NULL;
563     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
564   }
565
566   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
567   size_t alloc_size = 0;
568   if (copy) {
569     alloc_size += GetAllocLength(name);
570     for (i = 0; i < num_args; ++i) {
571       alloc_size += GetAllocLength(arg_names_[i]);
572       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
573         arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
574     }
575   }
576
577   bool arg_is_copy[kTraceMaxNumArgs];
578   for (i = 0; i < num_args; ++i) {
579     // No copying of convertable types, we retain ownership.
580     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
581       continue;
582
583     // We only take a copy of arg_vals if they are of type COPY_STRING.
584     arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
585     if (arg_is_copy[i])
586       alloc_size += GetAllocLength(arg_values_[i].as_string);
587   }
588
589   if (alloc_size) {
590     parameter_copy_storage_ = new RefCountedString;
591     parameter_copy_storage_->data().resize(alloc_size);
592     char* ptr = string_as_array(&parameter_copy_storage_->data());
593     const char* end = ptr + alloc_size;
594     if (copy) {
595       CopyTraceEventParameter(&ptr, &name_, end);
596       for (i = 0; i < num_args; ++i) {
597         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
598       }
599     }
600     for (i = 0; i < num_args; ++i) {
601       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
602         continue;
603       if (arg_is_copy[i])
604         CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
605     }
606     DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
607   }
608 }
609
610 void TraceEvent::Reset() {
611   // Only reset fields that won't be initialized in Initialize(), or that may
612   // hold references to other objects.
613   duration_ = TimeDelta::FromInternalValue(-1);
614   parameter_copy_storage_ = NULL;
615   for (int i = 0; i < kTraceMaxNumArgs; ++i)
616     convertable_values_[i] = NULL;
617 }
618
619 void TraceEvent::UpdateDuration(const TimeTicks& now,
620                                 const TimeTicks& thread_now) {
621   DCHECK(duration_.ToInternalValue() == -1);
622   duration_ = now - timestamp_;
623   thread_duration_ = thread_now - thread_timestamp_;
624 }
625
626 // static
627 void TraceEvent::AppendValueAsJSON(unsigned char type,
628                                    TraceEvent::TraceValue value,
629                                    std::string* out) {
630   switch (type) {
631     case TRACE_VALUE_TYPE_BOOL:
632       *out += value.as_bool ? "true" : "false";
633       break;
634     case TRACE_VALUE_TYPE_UINT:
635       StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
636       break;
637     case TRACE_VALUE_TYPE_INT:
638       StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
639       break;
640     case TRACE_VALUE_TYPE_DOUBLE: {
641       // FIXME: base/json/json_writer.cc is using the same code,
642       //        should be made into a common method.
643       std::string real;
644       double val = value.as_double;
645       if (IsFinite(val)) {
646         real = DoubleToString(val);
647         // Ensure that the number has a .0 if there's no decimal or 'e'.  This
648         // makes sure that when we read the JSON back, it's interpreted as a
649         // real rather than an int.
650         if (real.find('.') == std::string::npos &&
651             real.find('e') == std::string::npos &&
652             real.find('E') == std::string::npos) {
653           real.append(".0");
654         }
655         // The JSON spec requires that non-integer values in the range (-1,1)
656         // have a zero before the decimal point - ".52" is not valid, "0.52" is.
657         if (real[0] == '.') {
658           real.insert(0, "0");
659         } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
660           // "-.1" bad "-0.1" good
661           real.insert(1, "0");
662         }
663       } else if (IsNaN(val)){
664         // The JSON spec doesn't allow NaN and Infinity (since these are
665         // objects in EcmaScript).  Use strings instead.
666         real = "\"NaN\"";
667       } else if (val < 0) {
668         real = "\"-Infinity\"";
669       } else {
670         real = "\"Infinity\"";
671       }
672       StringAppendF(out, "%s", real.c_str());
673       break;
674     }
675     case TRACE_VALUE_TYPE_POINTER:
676       // JSON only supports double and int numbers.
677       // So as not to lose bits from a 64-bit pointer, output as a hex string.
678       StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
679                                      reinterpret_cast<intptr_t>(
680                                      value.as_pointer)));
681       break;
682     case TRACE_VALUE_TYPE_STRING:
683     case TRACE_VALUE_TYPE_COPY_STRING:
684       EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
685       break;
686     default:
687       NOTREACHED() << "Don't know how to print this value";
688       break;
689   }
690 }
691
692 void TraceEvent::AppendAsJSON(std::string* out) const {
693   int64 time_int64 = timestamp_.ToInternalValue();
694   int process_id = TraceLog::GetInstance()->process_id();
695   // Category group checked at category creation time.
696   DCHECK(!strchr(name_, '"'));
697   StringAppendF(out,
698       "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
699       "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
700       TraceLog::GetCategoryGroupName(category_group_enabled_),
701       process_id,
702       thread_id_,
703       time_int64,
704       phase_,
705       name_);
706
707   // Output argument names and values, stop at first NULL argument name.
708   for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
709     if (i > 0)
710       *out += ",";
711     *out += "\"";
712     *out += arg_names_[i];
713     *out += "\":";
714
715     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
716       convertable_values_[i]->AppendAsTraceFormat(out);
717     else
718       AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
719   }
720   *out += "}";
721
722   if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
723     int64 duration = duration_.ToInternalValue();
724     if (duration != -1)
725       StringAppendF(out, ",\"dur\":%" PRId64, duration);
726     if (!thread_timestamp_.is_null()) {
727       int64 thread_duration = thread_duration_.ToInternalValue();
728       if (thread_duration != -1)
729         StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
730     }
731   }
732
733   // Output tts if thread_timestamp is valid.
734   if (!thread_timestamp_.is_null()) {
735     int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
736     StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
737   }
738
739   // If id_ is set, print it out as a hex string so we don't loose any
740   // bits (it might be a 64-bit pointer).
741   if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
742     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
743
744   // Instant events also output their scope.
745   if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
746     char scope = '?';
747     switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
748       case TRACE_EVENT_SCOPE_GLOBAL:
749         scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
750         break;
751
752       case TRACE_EVENT_SCOPE_PROCESS:
753         scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
754         break;
755
756       case TRACE_EVENT_SCOPE_THREAD:
757         scope = TRACE_EVENT_SCOPE_NAME_THREAD;
758         break;
759     }
760     StringAppendF(out, ",\"s\":\"%c\"", scope);
761   }
762
763   *out += "}";
764 }
765
766 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
767   *out << name_ << "[";
768   *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
769   *out << "]";
770   if (arg_names_[0]) {
771     *out << ", {";
772     for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
773       if (i > 0)
774         *out << ", ";
775       *out << arg_names_[i] << ":";
776       std::string value_as_text;
777
778       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
779         convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
780       else
781         AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
782
783       *out << value_as_text;
784     }
785     *out << "}";
786   }
787 }
788
789 ////////////////////////////////////////////////////////////////////////////////
790 //
791 // TraceResultBuffer
792 //
793 ////////////////////////////////////////////////////////////////////////////////
794
795 TraceResultBuffer::OutputCallback
796     TraceResultBuffer::SimpleOutput::GetCallback() {
797   return Bind(&SimpleOutput::Append, Unretained(this));
798 }
799
800 void TraceResultBuffer::SimpleOutput::Append(
801     const std::string& json_trace_output) {
802   json_output += json_trace_output;
803 }
804
805 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
806 }
807
808 TraceResultBuffer::~TraceResultBuffer() {
809 }
810
811 void TraceResultBuffer::SetOutputCallback(
812     const OutputCallback& json_chunk_callback) {
813   output_callback_ = json_chunk_callback;
814 }
815
816 void TraceResultBuffer::Start() {
817   append_comma_ = false;
818   output_callback_.Run("[");
819 }
820
821 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
822   if (append_comma_)
823     output_callback_.Run(",");
824   append_comma_ = true;
825   output_callback_.Run(trace_fragment);
826 }
827
828 void TraceResultBuffer::Finish() {
829   output_callback_.Run("]");
830 }
831
832 ////////////////////////////////////////////////////////////////////////////////
833 //
834 // TraceSamplingThread
835 //
836 ////////////////////////////////////////////////////////////////////////////////
837 class TraceBucketData;
838 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
839
840 class TraceBucketData {
841  public:
842   TraceBucketData(base::subtle::AtomicWord* bucket,
843                   const char* name,
844                   TraceSampleCallback callback);
845   ~TraceBucketData();
846
847   TRACE_EVENT_API_ATOMIC_WORD* bucket;
848   const char* bucket_name;
849   TraceSampleCallback callback;
850 };
851
852 // This object must be created on the IO thread.
853 class TraceSamplingThread : public PlatformThread::Delegate {
854  public:
855   TraceSamplingThread();
856   virtual ~TraceSamplingThread();
857
858   // Implementation of PlatformThread::Delegate:
859   virtual void ThreadMain() OVERRIDE;
860
861   static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
862
863   void Stop();
864   void WaitSamplingEventForTesting();
865
866  private:
867   friend class TraceLog;
868
869   void GetSamples();
870   // Not thread-safe. Once the ThreadMain has been called, this can no longer
871   // be called.
872   void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
873                             const char* const name,
874                             TraceSampleCallback callback);
875   // Splits a combined "category\0name" into the two component parts.
876   static void ExtractCategoryAndName(const char* combined,
877                                      const char** category,
878                                      const char** name);
879   std::vector<TraceBucketData> sample_buckets_;
880   bool thread_running_;
881   CancellationFlag cancellation_flag_;
882   WaitableEvent waitable_event_for_testing_;
883 };
884
885
886 TraceSamplingThread::TraceSamplingThread()
887     : thread_running_(false),
888       waitable_event_for_testing_(false, false) {
889 }
890
891 TraceSamplingThread::~TraceSamplingThread() {
892 }
893
894 void TraceSamplingThread::ThreadMain() {
895   PlatformThread::SetName("Sampling Thread");
896   thread_running_ = true;
897   const int kSamplingFrequencyMicroseconds = 1000;
898   while (!cancellation_flag_.IsSet()) {
899     PlatformThread::Sleep(
900         TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
901     GetSamples();
902     waitable_event_for_testing_.Signal();
903   }
904 }
905
906 // static
907 void TraceSamplingThread::DefaultSamplingCallback(
908     TraceBucketData* bucket_data) {
909   TRACE_EVENT_API_ATOMIC_WORD category_and_name =
910       TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
911   if (!category_and_name)
912     return;
913   const char* const combined =
914       reinterpret_cast<const char* const>(category_and_name);
915   const char* category_group;
916   const char* name;
917   ExtractCategoryAndName(combined, &category_group, &name);
918   TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
919       TraceLog::GetCategoryGroupEnabled(category_group),
920       name, 0, 0, NULL, NULL, NULL, NULL, 0);
921 }
922
923 void TraceSamplingThread::GetSamples() {
924   for (size_t i = 0; i < sample_buckets_.size(); ++i) {
925     TraceBucketData* bucket_data = &sample_buckets_[i];
926     bucket_data->callback.Run(bucket_data);
927   }
928 }
929
930 void TraceSamplingThread::RegisterSampleBucket(
931     TRACE_EVENT_API_ATOMIC_WORD* bucket,
932     const char* const name,
933     TraceSampleCallback callback) {
934   // Access to sample_buckets_ doesn't cause races with the sampling thread
935   // that uses the sample_buckets_, because it is guaranteed that
936   // RegisterSampleBucket is called before the sampling thread is created.
937   DCHECK(!thread_running_);
938   sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
939 }
940
941 // static
942 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
943                                                  const char** category,
944                                                  const char** name) {
945   *category = combined;
946   *name = &combined[strlen(combined) + 1];
947 }
948
949 void TraceSamplingThread::Stop() {
950   cancellation_flag_.Set();
951 }
952
953 void TraceSamplingThread::WaitSamplingEventForTesting() {
954   waitable_event_for_testing_.Wait();
955 }
956
957 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
958                                  const char* name,
959                                  TraceSampleCallback callback)
960     : bucket(bucket),
961       bucket_name(name),
962       callback(callback) {
963 }
964
965 TraceBucketData::~TraceBucketData() {
966 }
967
968 ////////////////////////////////////////////////////////////////////////////////
969 //
970 // TraceLog
971 //
972 ////////////////////////////////////////////////////////////////////////////////
973
974 class TraceLog::ThreadLocalEventBuffer
975     : public MessageLoop::DestructionObserver {
976  public:
977   ThreadLocalEventBuffer(TraceLog* trace_log);
978   virtual ~ThreadLocalEventBuffer();
979
980   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
981
982   void ReportOverhead(const TimeTicks& event_timestamp,
983                       const TimeTicks& event_thread_timestamp);
984
985   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
986     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
987         handle.chunk_index != chunk_index_)
988       return NULL;
989
990     return chunk_->GetEventAt(handle.event_index);
991   }
992
993   int generation() const { return generation_; }
994
995  private:
996   // MessageLoop::DestructionObserver
997   virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
998
999   void FlushWhileLocked();
1000
1001   void CheckThisIsCurrentBuffer() const {
1002     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1003   }
1004
1005   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1006   // as long as the thread exists.
1007   TraceLog* trace_log_;
1008   scoped_ptr<TraceBufferChunk> chunk_;
1009   size_t chunk_index_;
1010   int event_count_;
1011   TimeDelta overhead_;
1012   int generation_;
1013
1014   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
1015 };
1016
1017 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
1018     : trace_log_(trace_log),
1019       chunk_index_(0),
1020       event_count_(0),
1021       generation_(trace_log->generation()) {
1022   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1023   // the following message_loop won't be NULL.
1024   MessageLoop* message_loop = MessageLoop::current();
1025   message_loop->AddDestructionObserver(this);
1026
1027   AutoLock lock(trace_log->lock_);
1028   trace_log->thread_message_loops_.insert(message_loop);
1029 }
1030
1031 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1032   CheckThisIsCurrentBuffer();
1033   MessageLoop::current()->RemoveDestructionObserver(this);
1034
1035   // Zero event_count_ happens in either of the following cases:
1036   // - no event generated for the thread;
1037   // - the thread has no message loop;
1038   // - trace_event_overhead is disabled.
1039   if (event_count_) {
1040     InitializeMetadataEvent(AddTraceEvent(NULL),
1041                             static_cast<int>(base::PlatformThread::CurrentId()),
1042                             "overhead", "average_overhead",
1043                             overhead_.InMillisecondsF() / event_count_);
1044   }
1045
1046   {
1047     AutoLock lock(trace_log_->lock_);
1048     FlushWhileLocked();
1049     trace_log_->thread_message_loops_.erase(MessageLoop::current());
1050   }
1051   trace_log_->thread_local_event_buffer_.Set(NULL);
1052 }
1053
1054 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1055     TraceEventHandle* handle) {
1056   CheckThisIsCurrentBuffer();
1057
1058   if (chunk_ && chunk_->IsFull()) {
1059     AutoLock lock(trace_log_->lock_);
1060     FlushWhileLocked();
1061     chunk_.reset();
1062   }
1063   if (!chunk_) {
1064     AutoLock lock(trace_log_->lock_);
1065     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
1066     trace_log_->CheckIfBufferIsFullWhileLocked();
1067   }
1068   if (!chunk_)
1069     return NULL;
1070
1071   size_t event_index;
1072   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1073   if (trace_event && handle)
1074     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1075
1076   return trace_event;
1077 }
1078
1079 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1080     const TimeTicks& event_timestamp,
1081     const TimeTicks& event_thread_timestamp) {
1082   if (!g_category_group_enabled[g_category_trace_event_overhead])
1083     return;
1084
1085   CheckThisIsCurrentBuffer();
1086
1087   event_count_++;
1088   TimeTicks thread_now = ThreadNow();
1089   TimeTicks now = trace_log_->OffsetNow();
1090   TimeDelta overhead = now - event_timestamp;
1091   if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1092     TraceEvent* trace_event = AddTraceEvent(NULL);
1093     if (trace_event) {
1094       trace_event->Initialize(
1095           static_cast<int>(PlatformThread::CurrentId()),
1096           event_timestamp, event_thread_timestamp,
1097           TRACE_EVENT_PHASE_COMPLETE,
1098           &g_category_group_enabled[g_category_trace_event_overhead],
1099           "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
1100       trace_event->UpdateDuration(now, thread_now);
1101     }
1102   }
1103   overhead_ += overhead;
1104 }
1105
1106 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1107   delete this;
1108 }
1109
1110 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1111   if (!chunk_)
1112     return;
1113
1114   trace_log_->lock_.AssertAcquired();
1115   if (trace_log_->CheckGeneration(generation_)) {
1116     // Return the chunk to the buffer only if the generation matches.
1117     trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1118   }
1119   // Otherwise this method may be called from the destructor, or TraceLog will
1120   // find the generation mismatch and delete this buffer soon.
1121 }
1122
1123 // static
1124 TraceLog* TraceLog::GetInstance() {
1125   return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1126 }
1127
1128 TraceLog::TraceLog()
1129     : mode_(DISABLED),
1130       num_traces_recorded_(0),
1131       event_callback_(0),
1132       dispatching_to_observer_list_(false),
1133       process_sort_index_(0),
1134       process_id_hash_(0),
1135       process_id_(0),
1136       watch_category_(0),
1137       trace_options_(RECORD_UNTIL_FULL),
1138       sampling_thread_handle_(0),
1139       category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1140       event_callback_category_filter_(
1141           CategoryFilter::kDefaultCategoryFilterString),
1142       thread_shared_chunk_index_(0),
1143       generation_(0) {
1144   // Trace is enabled or disabled on one thread while other threads are
1145   // accessing the enabled flag. We don't care whether edge-case events are
1146   // traced or not, so we allow races on the enabled flag to keep the trace
1147   // macros fast.
1148   // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1149   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1150   //                            sizeof(g_category_group_enabled),
1151   //                           "trace_event category enabled");
1152   for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
1153     ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
1154                          "trace_event category enabled");
1155   }
1156 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
1157   SetProcessID(0);
1158 #else
1159   SetProcessID(static_cast<int>(GetCurrentProcId()));
1160
1161   // NaCl also shouldn't access the command line.
1162   if (CommandLine::InitializedForCurrentProcess() &&
1163       CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
1164     std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1165         switches::kTraceToConsole);
1166     if (filter.empty()) {
1167       filter = kEchoToConsoleCategoryFilter;
1168     } else {
1169       filter.append(",");
1170       filter.append(kEchoToConsoleCategoryFilter);
1171     }
1172
1173     LOG(ERROR) << "Start " << switches::kTraceToConsole
1174                << " with CategoryFilter '" << filter << "'.";
1175     SetEnabled(CategoryFilter(filter), RECORDING_MODE, ECHO_TO_CONSOLE);
1176   }
1177 #endif
1178
1179   logged_events_.reset(CreateTraceBuffer());
1180 }
1181
1182 TraceLog::~TraceLog() {
1183 }
1184
1185 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1186     const char* category_group) {
1187   TraceLog* tracelog = GetInstance();
1188   if (!tracelog) {
1189     DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
1190     return &g_category_group_enabled[g_category_already_shutdown];
1191   }
1192   return tracelog->GetCategoryGroupEnabledInternal(category_group);
1193 }
1194
1195 const char* TraceLog::GetCategoryGroupName(
1196     const unsigned char* category_group_enabled) {
1197   // Calculate the index of the category group by finding
1198   // category_group_enabled in g_category_group_enabled array.
1199   uintptr_t category_begin =
1200       reinterpret_cast<uintptr_t>(g_category_group_enabled);
1201   uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
1202   DCHECK(category_ptr >= category_begin &&
1203          category_ptr < reinterpret_cast<uintptr_t>(
1204              g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1205       "out of bounds category pointer";
1206   uintptr_t category_index =
1207       (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1208   return g_category_groups[category_index];
1209 }
1210
1211 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1212   unsigned char enabled_flag = 0;
1213   const char* category_group = g_category_groups[category_index];
1214   if (mode_ == RECORDING_MODE &&
1215       category_filter_.IsCategoryGroupEnabled(category_group))
1216     enabled_flag |= ENABLED_FOR_RECORDING;
1217   else if (mode_ == MONITORING_MODE &&
1218       category_filter_.IsCategoryGroupEnabled(category_group))
1219     enabled_flag |= ENABLED_FOR_MONITORING;
1220   if (event_callback_ &&
1221       event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
1222     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1223   g_category_group_enabled[category_index] = enabled_flag;
1224 }
1225
1226 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1227   for (int i = 0; i < g_category_index; i++)
1228     UpdateCategoryGroupEnabledFlag(i);
1229 }
1230
1231 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1232   ResetTraceEventSyntheticDelays();
1233   const CategoryFilter::StringList& delays =
1234       category_filter_.GetSyntheticDelayValues();
1235   CategoryFilter::StringList::const_iterator ci;
1236   for (ci = delays.begin(); ci != delays.end(); ++ci) {
1237     StringTokenizer tokens(*ci, ";");
1238     if (!tokens.GetNext())
1239       continue;
1240     TraceEventSyntheticDelay* delay =
1241         TraceEventSyntheticDelay::Lookup(tokens.token());
1242     while (tokens.GetNext()) {
1243       std::string token = tokens.token();
1244       char* duration_end;
1245       double target_duration = strtod(token.c_str(), &duration_end);
1246       if (duration_end != token.c_str()) {
1247         delay->SetTargetDuration(
1248             TimeDelta::FromMicroseconds(target_duration * 1e6));
1249       } else if (token == "static") {
1250         delay->SetMode(TraceEventSyntheticDelay::STATIC);
1251       } else if (token == "oneshot") {
1252         delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
1253       } else if (token == "alternating") {
1254         delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
1255       }
1256     }
1257   }
1258 }
1259
1260 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1261     const char* category_group) {
1262   DCHECK(!strchr(category_group, '"')) <<
1263       "Category groups may not contain double quote";
1264   AutoLock lock(lock_);
1265
1266   unsigned char* category_group_enabled = NULL;
1267   // Search for pre-existing category group.
1268   for (int i = 0; i < g_category_index; i++) {
1269     if (strcmp(g_category_groups[i], category_group) == 0) {
1270       category_group_enabled = &g_category_group_enabled[i];
1271       break;
1272     }
1273   }
1274
1275   if (!category_group_enabled) {
1276     // Create a new category group
1277     DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
1278         "must increase MAX_CATEGORY_GROUPS";
1279     if (g_category_index < MAX_CATEGORY_GROUPS) {
1280       int new_index = g_category_index++;
1281       // Don't hold on to the category_group pointer, so that we can create
1282       // category groups with strings not known at compile time (this is
1283       // required by SetWatchEvent).
1284       const char* new_group = strdup(category_group);
1285       ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1286       g_category_groups[new_index] = new_group;
1287       DCHECK(!g_category_group_enabled[new_index]);
1288       // Note that if both included and excluded patterns in the
1289       // CategoryFilter are empty, we exclude nothing,
1290       // thereby enabling this category group.
1291       UpdateCategoryGroupEnabledFlag(new_index);
1292       category_group_enabled = &g_category_group_enabled[new_index];
1293     } else {
1294       category_group_enabled =
1295           &g_category_group_enabled[g_category_categories_exhausted];
1296     }
1297   }
1298   return category_group_enabled;
1299 }
1300
1301 void TraceLog::GetKnownCategoryGroups(
1302     std::vector<std::string>* category_groups) {
1303   AutoLock lock(lock_);
1304   category_groups->push_back(
1305       g_category_groups[g_category_trace_event_overhead]);
1306   for (int i = g_num_builtin_categories; i < g_category_index; i++)
1307     category_groups->push_back(g_category_groups[i]);
1308 }
1309
1310 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
1311                           Mode mode,
1312                           Options options) {
1313   std::vector<EnabledStateObserver*> observer_list;
1314   {
1315     AutoLock lock(lock_);
1316
1317     // Can't enable tracing when Flush() is in progress.
1318     DCHECK(!flush_message_loop_proxy_.get());
1319
1320     Options old_options = trace_options();
1321
1322     if (IsEnabled()) {
1323       if (options != old_options) {
1324         DLOG(ERROR) << "Attempting to re-enable tracing with a different "
1325                     << "set of options.";
1326       }
1327
1328       if (mode != mode_) {
1329         DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
1330       }
1331
1332       category_filter_.Merge(category_filter);
1333       UpdateCategoryGroupEnabledFlags();
1334       return;
1335     }
1336
1337     if (dispatching_to_observer_list_) {
1338       DLOG(ERROR) <<
1339           "Cannot manipulate TraceLog::Enabled state from an observer.";
1340       return;
1341     }
1342
1343     mode_ = mode;
1344
1345     if (options != old_options) {
1346       subtle::NoBarrier_Store(&trace_options_, options);
1347       UseNextTraceBuffer();
1348     }
1349
1350     num_traces_recorded_++;
1351
1352     category_filter_ = CategoryFilter(category_filter);
1353     UpdateCategoryGroupEnabledFlags();
1354     UpdateSyntheticDelaysFromCategoryFilter();
1355
1356     if (options & ENABLE_SAMPLING) {
1357       sampling_thread_.reset(new TraceSamplingThread);
1358       sampling_thread_->RegisterSampleBucket(
1359           &g_trace_state[0],
1360           "bucket0",
1361           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1362       sampling_thread_->RegisterSampleBucket(
1363           &g_trace_state[1],
1364           "bucket1",
1365           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1366       sampling_thread_->RegisterSampleBucket(
1367           &g_trace_state[2],
1368           "bucket2",
1369           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1370       if (!PlatformThread::Create(
1371             0, sampling_thread_.get(), &sampling_thread_handle_)) {
1372         DCHECK(false) << "failed to create thread";
1373       }
1374     }
1375
1376     dispatching_to_observer_list_ = true;
1377     observer_list = enabled_state_observer_list_;
1378   }
1379   // Notify observers outside the lock in case they trigger trace events.
1380   for (size_t i = 0; i < observer_list.size(); ++i)
1381     observer_list[i]->OnTraceLogEnabled();
1382
1383   {
1384     AutoLock lock(lock_);
1385     dispatching_to_observer_list_ = false;
1386   }
1387 }
1388
1389 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
1390   AutoLock lock(lock_);
1391   return category_filter_;
1392 }
1393
1394 void TraceLog::SetDisabled() {
1395   AutoLock lock(lock_);
1396   SetDisabledWhileLocked();
1397 }
1398
1399 void TraceLog::SetDisabledWhileLocked() {
1400   lock_.AssertAcquired();
1401
1402   if (!IsEnabled())
1403     return;
1404
1405   if (dispatching_to_observer_list_) {
1406     DLOG(ERROR)
1407         << "Cannot manipulate TraceLog::Enabled state from an observer.";
1408     return;
1409   }
1410
1411   mode_ = DISABLED;
1412
1413   if (sampling_thread_.get()) {
1414     // Stop the sampling thread.
1415     sampling_thread_->Stop();
1416     lock_.Release();
1417     PlatformThread::Join(sampling_thread_handle_);
1418     lock_.Acquire();
1419     sampling_thread_handle_ = PlatformThreadHandle();
1420     sampling_thread_.reset();
1421   }
1422
1423   category_filter_.Clear();
1424   subtle::NoBarrier_Store(&watch_category_, 0);
1425   watch_event_name_ = "";
1426   UpdateCategoryGroupEnabledFlags();
1427   AddMetadataEventsWhileLocked();
1428
1429   dispatching_to_observer_list_ = true;
1430   std::vector<EnabledStateObserver*> observer_list =
1431       enabled_state_observer_list_;
1432
1433   {
1434     // Dispatch to observers outside the lock in case the observer triggers a
1435     // trace event.
1436     AutoUnlock unlock(lock_);
1437     for (size_t i = 0; i < observer_list.size(); ++i)
1438       observer_list[i]->OnTraceLogDisabled();
1439   }
1440   dispatching_to_observer_list_ = false;
1441 }
1442
1443 int TraceLog::GetNumTracesRecorded() {
1444   AutoLock lock(lock_);
1445   if (!IsEnabled())
1446     return -1;
1447   return num_traces_recorded_;
1448 }
1449
1450 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1451   enabled_state_observer_list_.push_back(listener);
1452 }
1453
1454 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1455   std::vector<EnabledStateObserver*>::iterator it =
1456       std::find(enabled_state_observer_list_.begin(),
1457                 enabled_state_observer_list_.end(),
1458                 listener);
1459   if (it != enabled_state_observer_list_.end())
1460     enabled_state_observer_list_.erase(it);
1461 }
1462
1463 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1464   std::vector<EnabledStateObserver*>::const_iterator it =
1465       std::find(enabled_state_observer_list_.begin(),
1466                 enabled_state_observer_list_.end(),
1467                 listener);
1468   return it != enabled_state_observer_list_.end();
1469 }
1470
1471 float TraceLog::GetBufferPercentFull() const {
1472   AutoLock lock(lock_);
1473   return static_cast<float>(static_cast<double>(logged_events_->Size()) /
1474                             logged_events_->Capacity());
1475 }
1476
1477 bool TraceLog::BufferIsFull() const {
1478   AutoLock lock(lock_);
1479   return logged_events_->IsFull();
1480 }
1481
1482 TraceBuffer* TraceLog::CreateTraceBuffer() {
1483   Options options = trace_options();
1484   if (options & RECORD_CONTINUOUSLY)
1485     return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
1486   else if ((options & ENABLE_SAMPLING) && mode_ == MONITORING_MODE)
1487     return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
1488   else if (options & ECHO_TO_CONSOLE)
1489     return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1490   return new TraceBufferVector();
1491 }
1492
1493 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1494     TraceEventHandle* handle, bool check_buffer_is_full) {
1495   lock_.AssertAcquired();
1496
1497   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1498     logged_events_->ReturnChunk(thread_shared_chunk_index_,
1499                                 thread_shared_chunk_.Pass());
1500   }
1501
1502   if (!thread_shared_chunk_) {
1503     thread_shared_chunk_ = logged_events_->GetChunk(
1504         &thread_shared_chunk_index_);
1505     if (check_buffer_is_full)
1506       CheckIfBufferIsFullWhileLocked();
1507   }
1508   if (!thread_shared_chunk_)
1509     return NULL;
1510
1511   size_t event_index;
1512   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1513   if (trace_event && handle) {
1514     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1515                event_index, handle);
1516   }
1517   return trace_event;
1518 }
1519
1520 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1521   lock_.AssertAcquired();
1522   if (logged_events_->IsFull())
1523     SetDisabledWhileLocked();
1524 }
1525
1526 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
1527                                        EventCallback cb) {
1528   AutoLock lock(lock_);
1529   subtle::NoBarrier_Store(&event_callback_,
1530                           reinterpret_cast<subtle::AtomicWord>(cb));
1531   event_callback_category_filter_ = category_filter;
1532   UpdateCategoryGroupEnabledFlags();
1533 };
1534
1535 void TraceLog::SetEventCallbackDisabled() {
1536   AutoLock lock(lock_);
1537   subtle::NoBarrier_Store(&event_callback_, 0);
1538   UpdateCategoryGroupEnabledFlags();
1539 }
1540
1541 // Flush() works as the following:
1542 // 1. Flush() is called in threadA whose message loop is saved in
1543 //    flush_message_loop_proxy_;
1544 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1545 //    loop to flush the thread local buffers; otherwise finish the flush;
1546 // 3. FlushCurrentThread() deletes the thread local event buffer:
1547 //    - The last batch of events of the thread are flushed into the main buffer;
1548 //    - The message loop will be removed from thread_message_loops_;
1549 //    If this is the last message loop, finish the flush;
1550 // 4. If any thread hasn't finish its flush in time, finish the flush.
1551 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1552   if (IsEnabled()) {
1553     // Can't flush when tracing is enabled because otherwise PostTask would
1554     // - generate more trace events;
1555     // - deschedule the calling thread on some platforms causing inaccurate
1556     //   timing of the trace events.
1557     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1558     if (!cb.is_null())
1559       cb.Run(empty_result, false);
1560     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1561     return;
1562   }
1563
1564   int generation = this->generation();
1565   {
1566     AutoLock lock(lock_);
1567     DCHECK(!flush_message_loop_proxy_.get());
1568     flush_message_loop_proxy_ = MessageLoopProxy::current();
1569     DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1570     flush_output_callback_ = cb;
1571
1572     if (thread_shared_chunk_) {
1573       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1574                                   thread_shared_chunk_.Pass());
1575     }
1576
1577     if (thread_message_loops_.size()) {
1578       for (hash_set<MessageLoop*>::const_iterator it =
1579            thread_message_loops_.begin();
1580            it != thread_message_loops_.end(); ++it) {
1581         (*it)->PostTask(
1582             FROM_HERE,
1583             Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
1584       }
1585       flush_message_loop_proxy_->PostDelayedTask(
1586           FROM_HERE,
1587           Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
1588           TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1589       return;
1590     }
1591   }
1592
1593   FinishFlush(generation);
1594 }
1595
1596 void TraceLog::ConvertTraceEventsToTraceFormat(
1597     scoped_ptr<TraceBuffer> logged_events,
1598     const TraceLog::OutputCallback& flush_output_callback) {
1599
1600   if (flush_output_callback.is_null())
1601     return;
1602
1603   // The callback need to be called at least once even if there is no events
1604   // to let the caller know the completion of flush.
1605   bool has_more_events = true;
1606   do {
1607     scoped_refptr<RefCountedString> json_events_str_ptr =
1608         new RefCountedString();
1609
1610     for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
1611       const TraceBufferChunk* chunk = logged_events->NextChunk();
1612       if (!chunk) {
1613         has_more_events = false;
1614         break;
1615       }
1616       for (size_t j = 0; j < chunk->size(); ++j) {
1617         if (i > 0 || j > 0)
1618           json_events_str_ptr->data().append(",");
1619         chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
1620       }
1621     }
1622
1623     flush_output_callback.Run(json_events_str_ptr, has_more_events);
1624   } while (has_more_events);
1625 }
1626
1627 void TraceLog::FinishFlush(int generation) {
1628   scoped_ptr<TraceBuffer> previous_logged_events;
1629   OutputCallback flush_output_callback;
1630
1631   if (!CheckGeneration(generation))
1632     return;
1633
1634   {
1635     AutoLock lock(lock_);
1636
1637     previous_logged_events.swap(logged_events_);
1638     UseNextTraceBuffer();
1639     thread_message_loops_.clear();
1640
1641     flush_message_loop_proxy_ = NULL;
1642     flush_output_callback = flush_output_callback_;
1643     flush_output_callback_.Reset();
1644   }
1645
1646   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1647                                   flush_output_callback);
1648 }
1649
1650 // Run in each thread holding a local event buffer.
1651 void TraceLog::FlushCurrentThread(int generation) {
1652   {
1653     AutoLock lock(lock_);
1654     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1655       // This is late. The corresponding flush has finished.
1656       return;
1657     }
1658   }
1659
1660   // This will flush the thread local buffer.
1661   delete thread_local_event_buffer_.Get();
1662
1663   AutoLock lock(lock_);
1664   if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
1665       thread_message_loops_.size())
1666     return;
1667
1668   flush_message_loop_proxy_->PostTask(
1669       FROM_HERE,
1670       Bind(&TraceLog::FinishFlush, Unretained(this), generation));
1671 }
1672
1673 void TraceLog::OnFlushTimeout(int generation) {
1674   {
1675     AutoLock lock(lock_);
1676     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1677       // Flush has finished before timeout.
1678       return;
1679     }
1680
1681     LOG(WARNING) <<
1682         "The following threads haven't finished flush in time. "
1683         "If this happens stably for some thread, please call "
1684         "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1685         "the thread to avoid its trace events from being lost.";
1686     for (hash_set<MessageLoop*>::const_iterator it =
1687          thread_message_loops_.begin();
1688          it != thread_message_loops_.end(); ++it) {
1689       LOG(WARNING) << "Thread: " << (*it)->thread_name();
1690     }
1691   }
1692   FinishFlush(generation);
1693 }
1694
1695 void TraceLog::FlushButLeaveBufferIntact(
1696     const TraceLog::OutputCallback& flush_output_callback) {
1697   scoped_ptr<TraceBuffer> previous_logged_events;
1698   {
1699     AutoLock lock(lock_);
1700     AddMetadataEventsWhileLocked();
1701     if (thread_shared_chunk_) {
1702       // Return the chunk to the main buffer to flush the sampling data.
1703       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1704                                   thread_shared_chunk_.Pass());
1705     }
1706     previous_logged_events = logged_events_->CloneForIteration().Pass();
1707   }  // release lock
1708
1709   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1710                                   flush_output_callback);
1711 }
1712
1713 void TraceLog::UseNextTraceBuffer() {
1714   logged_events_.reset(CreateTraceBuffer());
1715   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1716   thread_shared_chunk_.reset();
1717   thread_shared_chunk_index_ = 0;
1718 }
1719
1720 TraceEventHandle TraceLog::AddTraceEvent(
1721     char phase,
1722     const unsigned char* category_group_enabled,
1723     const char* name,
1724     unsigned long long id,
1725     int num_args,
1726     const char** arg_names,
1727     const unsigned char* arg_types,
1728     const unsigned long long* arg_values,
1729     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1730     unsigned char flags) {
1731   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1732   base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1733   return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1734                                                name, id, thread_id, now,
1735                                                num_args, arg_names,
1736                                                arg_types, arg_values,
1737                                                convertable_values, flags);
1738 }
1739
1740 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1741     char phase,
1742     const unsigned char* category_group_enabled,
1743     const char* name,
1744     unsigned long long id,
1745     int thread_id,
1746     const TimeTicks& timestamp,
1747     int num_args,
1748     const char** arg_names,
1749     const unsigned char* arg_types,
1750     const unsigned long long* arg_values,
1751     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1752     unsigned char flags) {
1753   TraceEventHandle handle = { 0, 0, 0 };
1754   if (!*category_group_enabled)
1755     return handle;
1756
1757   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1758   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1759   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1760   if (thread_is_in_trace_event_.Get())
1761     return handle;
1762
1763   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1764
1765   DCHECK(name);
1766
1767   if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1768     id ^= process_id_hash_;
1769
1770   TimeTicks now = OffsetTimestamp(timestamp);
1771   TimeTicks thread_now = ThreadNow();
1772
1773   ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1774   // A ThreadLocalEventBuffer needs the message loop
1775   // - to know when the thread exits;
1776   // - to handle the final flush.
1777   // For a thread without a message loop or the message loop may be blocked, the
1778   // trace events will be added into the main buffer directly.
1779   if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1780     thread_local_event_buffer = thread_local_event_buffer_.Get();
1781     if (thread_local_event_buffer &&
1782         !CheckGeneration(thread_local_event_buffer->generation())) {
1783       delete thread_local_event_buffer;
1784       thread_local_event_buffer = NULL;
1785     }
1786     if (!thread_local_event_buffer) {
1787       thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1788       thread_local_event_buffer_.Set(thread_local_event_buffer);
1789     }
1790   }
1791
1792   // Check and update the current thread name only if the event is for the
1793   // current thread to avoid locks in most cases.
1794   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1795     const char* new_name = ThreadIdNameManager::GetInstance()->
1796         GetName(thread_id);
1797     // Check if the thread name has been set or changed since the previous
1798     // call (if any), but don't bother if the new name is empty. Note this will
1799     // not detect a thread name change within the same char* buffer address: we
1800     // favor common case performance over corner case correctness.
1801     if (new_name != g_current_thread_name.Get().Get() &&
1802         new_name && *new_name) {
1803       g_current_thread_name.Get().Set(new_name);
1804
1805       AutoLock thread_info_lock(thread_info_lock_);
1806
1807       hash_map<int, std::string>::iterator existing_name =
1808           thread_names_.find(thread_id);
1809       if (existing_name == thread_names_.end()) {
1810         // This is a new thread id, and a new name.
1811         thread_names_[thread_id] = new_name;
1812       } else {
1813         // This is a thread id that we've seen before, but potentially with a
1814         // new name.
1815         std::vector<StringPiece> existing_names;
1816         Tokenize(existing_name->second, ",", &existing_names);
1817         bool found = std::find(existing_names.begin(),
1818                                existing_names.end(),
1819                                new_name) != existing_names.end();
1820         if (!found) {
1821           if (existing_names.size())
1822             existing_name->second.push_back(',');
1823           existing_name->second.append(new_name);
1824         }
1825       }
1826     }
1827   }
1828
1829   std::string console_message;
1830   if (*category_group_enabled &
1831       (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
1832     OptionalAutoLock lock(lock_);
1833
1834     TraceEvent* trace_event = NULL;
1835     if (thread_local_event_buffer) {
1836       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1837     } else {
1838       lock.EnsureAcquired();
1839       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1840     }
1841
1842     if (trace_event) {
1843       trace_event->Initialize(thread_id, now, thread_now, phase,
1844                               category_group_enabled, name, id,
1845                               num_args, arg_names, arg_types, arg_values,
1846                               convertable_values, flags);
1847
1848 #if defined(OS_ANDROID)
1849       trace_event->SendToATrace();
1850 #endif
1851     }
1852
1853     if (trace_options() & ECHO_TO_CONSOLE) {
1854       console_message = EventToConsoleMessage(
1855           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1856           timestamp, trace_event);
1857     }
1858   }
1859
1860   if (console_message.size())
1861     LOG(ERROR) << console_message;
1862
1863   if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1864       &watch_category_)) == category_group_enabled) {
1865     bool event_name_matches;
1866     WatchEventCallback watch_event_callback_copy;
1867     {
1868       AutoLock lock(lock_);
1869       event_name_matches = watch_event_name_ == name;
1870       watch_event_callback_copy = watch_event_callback_;
1871     }
1872     if (event_name_matches) {
1873       if (!watch_event_callback_copy.is_null())
1874         watch_event_callback_copy.Run();
1875     }
1876   }
1877
1878   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1879     EventCallback event_callback = reinterpret_cast<EventCallback>(
1880         subtle::NoBarrier_Load(&event_callback_));
1881     if (event_callback) {
1882       event_callback(now,
1883                      phase == TRACE_EVENT_PHASE_COMPLETE ?
1884                          TRACE_EVENT_PHASE_BEGIN : phase,
1885                      category_group_enabled, name, id,
1886                      num_args, arg_names, arg_types, arg_values,
1887                      flags);
1888     }
1889   }
1890
1891   if (thread_local_event_buffer)
1892     thread_local_event_buffer->ReportOverhead(now, thread_now);
1893
1894   return handle;
1895 }
1896
1897 // May be called when a COMPELETE event ends and the unfinished event has been
1898 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1899 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1900                                             const TimeTicks& timestamp,
1901                                             TraceEvent* trace_event) {
1902   AutoLock thread_info_lock(thread_info_lock_);
1903
1904   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1905   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1906   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1907
1908   TimeDelta duration;
1909   int thread_id = trace_event ?
1910       trace_event->thread_id() : PlatformThread::CurrentId();
1911   if (phase == TRACE_EVENT_PHASE_END) {
1912     duration = timestamp - thread_event_start_times_[thread_id].top();
1913     thread_event_start_times_[thread_id].pop();
1914   }
1915
1916   std::string thread_name = thread_names_[thread_id];
1917   if (thread_colors_.find(thread_name) == thread_colors_.end())
1918     thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1919
1920   std::ostringstream log;
1921   log << base::StringPrintf("%s: \x1b[0;3%dm",
1922                             thread_name.c_str(),
1923                             thread_colors_[thread_name]);
1924
1925   size_t depth = 0;
1926   if (thread_event_start_times_.find(thread_id) !=
1927       thread_event_start_times_.end())
1928     depth = thread_event_start_times_[thread_id].size();
1929
1930   for (size_t i = 0; i < depth; ++i)
1931     log << "| ";
1932
1933   if (trace_event)
1934     trace_event->AppendPrettyPrinted(&log);
1935   if (phase == TRACE_EVENT_PHASE_END)
1936     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1937
1938   log << "\x1b[0;m";
1939
1940   if (phase == TRACE_EVENT_PHASE_BEGIN)
1941     thread_event_start_times_[thread_id].push(timestamp);
1942
1943   return log.str();
1944 }
1945
1946 void TraceLog::AddTraceEventEtw(char phase,
1947                                 const char* name,
1948                                 const void* id,
1949                                 const char* extra) {
1950 #if defined(OS_WIN)
1951   TraceEventETWProvider::Trace(name, phase, id, extra);
1952 #endif
1953   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1954                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1955 }
1956
1957 void TraceLog::AddTraceEventEtw(char phase,
1958                                 const char* name,
1959                                 const void* id,
1960                                 const std::string& extra) {
1961 #if defined(OS_WIN)
1962   TraceEventETWProvider::Trace(name, phase, id, extra);
1963 #endif
1964   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1965                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1966 }
1967
1968 void TraceLog::UpdateTraceEventDuration(
1969     const unsigned char* category_group_enabled,
1970     const char* name,
1971     TraceEventHandle handle) {
1972   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1973   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1974   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1975   if (thread_is_in_trace_event_.Get())
1976     return;
1977
1978   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1979
1980   TimeTicks thread_now = ThreadNow();
1981   TimeTicks now = OffsetNow();
1982
1983   std::string console_message;
1984   if (*category_group_enabled & ENABLED_FOR_RECORDING) {
1985     OptionalAutoLock lock(lock_);
1986
1987     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1988     if (trace_event) {
1989       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1990       trace_event->UpdateDuration(now, thread_now);
1991 #if defined(OS_ANDROID)
1992       trace_event->SendToATrace();
1993 #endif
1994     }
1995
1996     if (trace_options() & ECHO_TO_CONSOLE) {
1997       console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
1998                                               now, trace_event);
1999     }
2000   }
2001
2002   if (console_message.size())
2003     LOG(ERROR) << console_message;
2004
2005   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
2006     EventCallback event_callback = reinterpret_cast<EventCallback>(
2007         subtle::NoBarrier_Load(&event_callback_));
2008     if (event_callback) {
2009       event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
2010                      trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
2011                      TRACE_EVENT_FLAG_NONE);
2012     }
2013   }
2014 }
2015
2016 void TraceLog::SetWatchEvent(const std::string& category_name,
2017                              const std::string& event_name,
2018                              const WatchEventCallback& callback) {
2019   const unsigned char* category = GetCategoryGroupEnabled(
2020       category_name.c_str());
2021   AutoLock lock(lock_);
2022   subtle::NoBarrier_Store(&watch_category_,
2023                           reinterpret_cast<subtle::AtomicWord>(category));
2024   watch_event_name_ = event_name;
2025   watch_event_callback_ = callback;
2026 }
2027
2028 void TraceLog::CancelWatchEvent() {
2029   AutoLock lock(lock_);
2030   subtle::NoBarrier_Store(&watch_category_, 0);
2031   watch_event_name_ = "";
2032   watch_event_callback_.Reset();
2033 }
2034
2035 void TraceLog::AddMetadataEventsWhileLocked() {
2036   lock_.AssertAcquired();
2037
2038   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
2039   if (process_sort_index_ != 0) {
2040     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2041                             current_thread_id,
2042                             "process_sort_index", "sort_index",
2043                             process_sort_index_);
2044   }
2045
2046   if (process_name_.size()) {
2047     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2048                             current_thread_id,
2049                             "process_name", "name",
2050                             process_name_);
2051   }
2052
2053   if (process_labels_.size() > 0) {
2054     std::vector<std::string> labels;
2055     for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
2056         it != process_labels_.end();
2057         it++) {
2058       labels.push_back(it->second);
2059     }
2060     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2061                             current_thread_id,
2062                             "process_labels", "labels",
2063                             JoinString(labels, ','));
2064   }
2065
2066   // Thread sort indices.
2067   for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
2068       it != thread_sort_indices_.end();
2069       it++) {
2070     if (it->second == 0)
2071       continue;
2072     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2073                             it->first,
2074                             "thread_sort_index", "sort_index",
2075                             it->second);
2076   }
2077
2078   // Thread names.
2079   AutoLock thread_info_lock(thread_info_lock_);
2080   for(hash_map<int, std::string>::iterator it = thread_names_.begin();
2081       it != thread_names_.end();
2082       it++) {
2083     if (it->second.empty())
2084       continue;
2085     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2086                             it->first,
2087                             "thread_name", "name",
2088                             it->second);
2089   }
2090 }
2091
2092 void TraceLog::WaitSamplingEventForTesting() {
2093   if (!sampling_thread_)
2094     return;
2095   sampling_thread_->WaitSamplingEventForTesting();
2096 }
2097
2098 void TraceLog::DeleteForTesting() {
2099   DeleteTraceLogForTesting::Delete();
2100 }
2101
2102 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2103   return GetEventByHandleInternal(handle, NULL);
2104 }
2105
2106 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2107                                                OptionalAutoLock* lock) {
2108   if (!handle.chunk_seq)
2109     return NULL;
2110
2111   if (thread_local_event_buffer_.Get()) {
2112     TraceEvent* trace_event =
2113         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
2114     if (trace_event)
2115       return trace_event;
2116   }
2117
2118   // The event has been out-of-control of the thread local buffer.
2119   // Try to get the event from the main buffer with a lock.
2120   if (lock)
2121     lock->EnsureAcquired();
2122
2123   if (thread_shared_chunk_ &&
2124       handle.chunk_index == thread_shared_chunk_index_) {
2125     return handle.chunk_seq == thread_shared_chunk_->seq() ?
2126         thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
2127   }
2128
2129   return logged_events_->GetEventByHandle(handle);
2130 }
2131
2132 void TraceLog::SetProcessID(int process_id) {
2133   process_id_ = process_id;
2134   // Create a FNV hash from the process ID for XORing.
2135   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2136   unsigned long long offset_basis = 14695981039346656037ull;
2137   unsigned long long fnv_prime = 1099511628211ull;
2138   unsigned long long pid = static_cast<unsigned long long>(process_id_);
2139   process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
2140 }
2141
2142 void TraceLog::SetProcessSortIndex(int sort_index) {
2143   AutoLock lock(lock_);
2144   process_sort_index_ = sort_index;
2145 }
2146
2147 void TraceLog::SetProcessName(const std::string& process_name) {
2148   AutoLock lock(lock_);
2149   process_name_ = process_name;
2150 }
2151
2152 void TraceLog::UpdateProcessLabel(
2153     int label_id, const std::string& current_label) {
2154   if(!current_label.length())
2155     return RemoveProcessLabel(label_id);
2156
2157   AutoLock lock(lock_);
2158   process_labels_[label_id] = current_label;
2159 }
2160
2161 void TraceLog::RemoveProcessLabel(int label_id) {
2162   AutoLock lock(lock_);
2163   base::hash_map<int, std::string>::iterator it = process_labels_.find(
2164       label_id);
2165   if (it == process_labels_.end())
2166     return;
2167
2168   process_labels_.erase(it);
2169 }
2170
2171 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2172   AutoLock lock(lock_);
2173   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
2174 }
2175
2176 void TraceLog::SetTimeOffset(TimeDelta offset) {
2177   time_offset_ = offset;
2178 }
2179
2180 size_t TraceLog::GetObserverCountForTest() const {
2181   return enabled_state_observer_list_.size();
2182 }
2183
2184 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2185   thread_blocks_message_loop_.Set(true);
2186   if (thread_local_event_buffer_.Get()) {
2187     // This will flush the thread local buffer.
2188     delete thread_local_event_buffer_.Get();
2189   }
2190 }
2191
2192 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2193     const std::string& str) {
2194   return  str.empty() ||
2195           str.at(0) == ' ' ||
2196           str.at(str.length() - 1) == ' ';
2197 }
2198
2199 bool CategoryFilter::DoesCategoryGroupContainCategory(
2200     const char* category_group,
2201     const char* category) const {
2202   DCHECK(category);
2203   CStringTokenizer category_group_tokens(category_group,
2204                           category_group + strlen(category_group), ",");
2205   while (category_group_tokens.GetNext()) {
2206     std::string category_group_token = category_group_tokens.token();
2207     // Don't allow empty tokens, nor tokens with leading or trailing space.
2208     DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2209         category_group_token))
2210         << "Disallowed category string";
2211     if (MatchPattern(category_group_token.c_str(), category))
2212       return true;
2213   }
2214   return false;
2215 }
2216
2217 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2218   if (!filter_string.empty())
2219     Initialize(filter_string);
2220   else
2221     Initialize(CategoryFilter::kDefaultCategoryFilterString);
2222 }
2223
2224 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2225     : included_(cf.included_),
2226       disabled_(cf.disabled_),
2227       excluded_(cf.excluded_),
2228       delays_(cf.delays_) {
2229 }
2230
2231 CategoryFilter::~CategoryFilter() {
2232 }
2233
2234 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2235   if (this == &rhs)
2236     return *this;
2237
2238   included_ = rhs.included_;
2239   disabled_ = rhs.disabled_;
2240   excluded_ = rhs.excluded_;
2241   delays_ = rhs.delays_;
2242   return *this;
2243 }
2244
2245 void CategoryFilter::Initialize(const std::string& filter_string) {
2246   // Tokenize list of categories, delimited by ','.
2247   StringTokenizer tokens(filter_string, ",");
2248   // Add each token to the appropriate list (included_,excluded_).
2249   while (tokens.GetNext()) {
2250     std::string category = tokens.token();
2251     // Ignore empty categories.
2252     if (category.empty())
2253       continue;
2254     // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2255     if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
2256         category.at(category.size() - 1) == ')') {
2257       category = category.substr(
2258           strlen(kSyntheticDelayCategoryFilterPrefix),
2259           category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
2260       size_t name_length = category.find(';');
2261       if (name_length != std::string::npos && name_length > 0 &&
2262           name_length != category.size() - 1) {
2263         delays_.push_back(category);
2264       }
2265     } else if (category.at(0) == '-') {
2266       // Excluded categories start with '-'.
2267       // Remove '-' from category string.
2268       category = category.substr(1);
2269       excluded_.push_back(category);
2270     } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2271                                 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2272       disabled_.push_back(category);
2273     } else {
2274       included_.push_back(category);
2275     }
2276   }
2277 }
2278
2279 void CategoryFilter::WriteString(const StringList& values,
2280                                  std::string* out,
2281                                  bool included) const {
2282   bool prepend_comma = !out->empty();
2283   int token_cnt = 0;
2284   for (StringList::const_iterator ci = values.begin();
2285        ci != values.end(); ++ci) {
2286     if (token_cnt > 0 || prepend_comma)
2287       StringAppendF(out, ",");
2288     StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2289     ++token_cnt;
2290   }
2291 }
2292
2293 void CategoryFilter::WriteString(const StringList& delays,
2294                                  std::string* out) const {
2295   bool prepend_comma = !out->empty();
2296   int token_cnt = 0;
2297   for (StringList::const_iterator ci = delays.begin();
2298        ci != delays.end(); ++ci) {
2299     if (token_cnt > 0 || prepend_comma)
2300       StringAppendF(out, ",");
2301     StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
2302                   ci->c_str());
2303     ++token_cnt;
2304   }
2305 }
2306
2307 std::string CategoryFilter::ToString() const {
2308   std::string filter_string;
2309   WriteString(included_, &filter_string, true);
2310   WriteString(disabled_, &filter_string, true);
2311   WriteString(excluded_, &filter_string, false);
2312   WriteString(delays_, &filter_string);
2313   return filter_string;
2314 }
2315
2316 bool CategoryFilter::IsCategoryGroupEnabled(
2317     const char* category_group_name) const {
2318   // TraceLog should call this method only as  part of enabling/disabling
2319   // categories.
2320   StringList::const_iterator ci;
2321
2322   // Check the disabled- filters and the disabled-* wildcard first so that a
2323   // "*" filter does not include the disabled.
2324   for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2325     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2326       return true;
2327   }
2328   if (DoesCategoryGroupContainCategory(category_group_name,
2329                                        TRACE_DISABLED_BY_DEFAULT("*")))
2330     return false;
2331
2332   for (ci = included_.begin(); ci != included_.end(); ++ci) {
2333     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2334       return true;
2335   }
2336
2337   for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
2338     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2339       return false;
2340   }
2341   // If the category group is not excluded, and there are no included patterns
2342   // we consider this pattern enabled.
2343   return included_.empty();
2344 }
2345
2346 bool CategoryFilter::HasIncludedPatterns() const {
2347   return !included_.empty();
2348 }
2349
2350 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2351   // Keep included patterns only if both filters have an included entry.
2352   // Otherwise, one of the filter was specifying "*" and we want to honour the
2353   // broadest filter.
2354   if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2355     included_.insert(included_.end(),
2356                      nested_filter.included_.begin(),
2357                      nested_filter.included_.end());
2358   } else {
2359     included_.clear();
2360   }
2361
2362   disabled_.insert(disabled_.end(),
2363                    nested_filter.disabled_.begin(),
2364                    nested_filter.disabled_.end());
2365   excluded_.insert(excluded_.end(),
2366                    nested_filter.excluded_.begin(),
2367                    nested_filter.excluded_.end());
2368   delays_.insert(delays_.end(),
2369                  nested_filter.delays_.begin(),
2370                  nested_filter.delays_.end());
2371 }
2372
2373 void CategoryFilter::Clear() {
2374   included_.clear();
2375   disabled_.clear();
2376   excluded_.clear();
2377 }
2378
2379 const CategoryFilter::StringList&
2380     CategoryFilter::GetSyntheticDelayValues() const {
2381   return delays_;
2382 }
2383
2384 }  // namespace debug
2385 }  // namespace base
2386
2387 namespace trace_event_internal {
2388
2389 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2390     const char* category_group, const char* name) {
2391   // The single atom works because for now the category_group can only be "gpu".
2392   DCHECK(strcmp(category_group, "gpu") == 0);
2393   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2394   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2395       category_group, atomic, category_group_enabled_);
2396   name_ = name;
2397   if (*category_group_enabled_) {
2398     event_handle_ =
2399         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2400             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2401             trace_event_internal::kNoEventId,
2402             static_cast<int>(base::PlatformThread::CurrentId()),
2403             base::TimeTicks::NowFromSystemTraceTime(),
2404             0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2405   }
2406 }
2407
2408 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2409   if (*category_group_enabled_) {
2410     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2411                                                 name_, event_handle_);
2412   }
2413 }
2414
2415 }  // namespace trace_event_internal