Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / base / debug / trace_event_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/trace_event_impl.h"
6
7 #include <algorithm>
8
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
36
37 #if defined(OS_WIN)
38 #include "base/debug/trace_event_win.h"
39 #endif
40
41 class DeleteTraceLogForTesting {
42  public:
43   static void Delete() {
44     Singleton<base::debug::TraceLog,
45               LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
46   }
47 };
48
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
51
52 namespace base {
53 namespace debug {
54
55 namespace {
56
57 // The overhead of TraceEvent above this threshold will be reported in the
58 // trace.
59 const int kOverheadReportThresholdInMicroseconds = 50;
60
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
64 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
65 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
66 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
71
72 const int kThreadFlushTimeoutMs = 3000;
73
74 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
75 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
76
77 const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
78
79 #define MAX_CATEGORY_GROUPS 100
80
81 // Parallel arrays g_category_groups and g_category_group_enabled are separate
82 // so that a pointer to a member of g_category_group_enabled can be easily
83 // converted to an index into g_category_groups. This allows macros to deal
84 // only with char enabled pointers from g_category_group_enabled, and we can
85 // convert internally to determine the category name from the char enabled
86 // pointer.
87 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
88   "toplevel",
89   "tracing already shutdown",
90   "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
91   "__metadata",
92   // For reporting trace_event overhead. For thread local event buffers only.
93   "trace_event_overhead"};
94
95 // The enabled flag is char instead of bool so that the API can be used from C.
96 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
97 // Indexes here have to match the g_category_groups array indexes above.
98 const int g_category_already_shutdown = 1;
99 const int g_category_categories_exhausted = 2;
100 const int g_category_metadata = 3;
101 const int g_category_trace_event_overhead = 4;
102 const int g_num_builtin_categories = 5;
103 int g_category_index = g_num_builtin_categories; // Skip default categories.
104
105 // The name of the current thread. This is used to decide if the current
106 // thread name has changed. We combine all the seen thread names into the
107 // output name for the thread.
108 LazyInstance<ThreadLocalPointer<const char> >::Leaky
109     g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
110
111 TimeTicks ThreadNow() {
112   return TimeTicks::IsThreadNowSupported() ?
113       TimeTicks::ThreadNow() : TimeTicks();
114 }
115
116 class TraceBufferRingBuffer : public TraceBuffer {
117  public:
118   TraceBufferRingBuffer(size_t max_chunks)
119       : max_chunks_(max_chunks),
120         recyclable_chunks_queue_(new size_t[queue_capacity()]),
121         queue_head_(0),
122         queue_tail_(max_chunks),
123         current_iteration_index_(0),
124         current_chunk_seq_(1) {
125     chunks_.reserve(max_chunks);
126     for (size_t i = 0; i < max_chunks; ++i)
127       recyclable_chunks_queue_[i] = i;
128   }
129
130   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
131     // Because the number of threads is much less than the number of chunks,
132     // the queue should never be empty.
133     DCHECK(!QueueIsEmpty());
134
135     *index = recyclable_chunks_queue_[queue_head_];
136     queue_head_ = NextQueueIndex(queue_head_);
137     current_iteration_index_ = queue_head_;
138
139     if (*index >= chunks_.size())
140       chunks_.resize(*index + 1);
141
142     TraceBufferChunk* chunk = chunks_[*index];
143     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
144     if (chunk)
145       chunk->Reset(current_chunk_seq_++);
146     else
147       chunk = new TraceBufferChunk(current_chunk_seq_++);
148
149     return scoped_ptr<TraceBufferChunk>(chunk);
150   }
151
152   virtual void ReturnChunk(size_t index,
153                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
154     // When this method is called, the queue should not be full because it
155     // can contain all chunks including the one to be returned.
156     DCHECK(!QueueIsFull());
157     DCHECK(chunk);
158     DCHECK_LT(index, chunks_.size());
159     DCHECK(!chunks_[index]);
160     chunks_[index] = chunk.release();
161     recyclable_chunks_queue_[queue_tail_] = index;
162     queue_tail_ = NextQueueIndex(queue_tail_);
163   }
164
165   virtual bool IsFull() const OVERRIDE {
166     return false;
167   }
168
169   virtual size_t Size() const OVERRIDE {
170     // This is approximate because not all of the chunks are full.
171     return chunks_.size() * kTraceBufferChunkSize;
172   }
173
174   virtual size_t Capacity() const OVERRIDE {
175     return max_chunks_ * kTraceBufferChunkSize;
176   }
177
178   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
179     if (handle.chunk_index >= chunks_.size())
180       return NULL;
181     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
182     if (!chunk || chunk->seq() != handle.chunk_seq)
183       return NULL;
184     return chunk->GetEventAt(handle.event_index);
185   }
186
187   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
188     if (chunks_.empty())
189       return NULL;
190
191     while (current_iteration_index_ != queue_tail_) {
192       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
193       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
194       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
195         continue;
196       DCHECK(chunks_[chunk_index]);
197       return chunks_[chunk_index];
198     }
199     return NULL;
200   }
201
202   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
203     scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
204     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
205         queue_index = NextQueueIndex(queue_index)) {
206       size_t chunk_index = recyclable_chunks_queue_[queue_index];
207       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
208         continue;
209       TraceBufferChunk* chunk = chunks_[chunk_index];
210       cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
211     }
212     return cloned_buffer.PassAs<TraceBuffer>();
213   }
214
215  private:
216   class ClonedTraceBuffer : public TraceBuffer {
217    public:
218     ClonedTraceBuffer() : current_iteration_index_(0) {}
219
220     // The only implemented method.
221     virtual const TraceBufferChunk* NextChunk() OVERRIDE {
222       return current_iteration_index_ < chunks_.size() ?
223           chunks_[current_iteration_index_++] : NULL;
224     }
225
226     virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
227       NOTIMPLEMENTED();
228       return scoped_ptr<TraceBufferChunk>();
229     }
230     virtual void ReturnChunk(size_t index,
231                              scoped_ptr<TraceBufferChunk>) OVERRIDE {
232       NOTIMPLEMENTED();
233     }
234     virtual bool IsFull() const OVERRIDE { return false; }
235     virtual size_t Size() const OVERRIDE { return 0; }
236     virtual size_t Capacity() const OVERRIDE { return 0; }
237     virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
238       return NULL;
239     }
240     virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
241       NOTIMPLEMENTED();
242       return scoped_ptr<TraceBuffer>();
243     }
244
245     size_t current_iteration_index_;
246     ScopedVector<TraceBufferChunk> chunks_;
247   };
248
249   bool QueueIsEmpty() const {
250     return queue_head_ == queue_tail_;
251   }
252
253   size_t QueueSize() const {
254     return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
255         queue_tail_ + queue_capacity() - queue_head_;
256   }
257
258   bool QueueIsFull() const {
259     return QueueSize() == queue_capacity() - 1;
260   }
261
262   size_t queue_capacity() const {
263     // One extra space to help distinguish full state and empty state.
264     return max_chunks_ + 1;
265   }
266
267   size_t NextQueueIndex(size_t index) const {
268     index++;
269     if (index >= queue_capacity())
270       index = 0;
271     return index;
272   }
273
274   size_t max_chunks_;
275   ScopedVector<TraceBufferChunk> chunks_;
276
277   scoped_ptr<size_t[]> recyclable_chunks_queue_;
278   size_t queue_head_;
279   size_t queue_tail_;
280
281   size_t current_iteration_index_;
282   uint32 current_chunk_seq_;
283
284   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
285 };
286
287 class TraceBufferVector : public TraceBuffer {
288  public:
289   TraceBufferVector()
290       : in_flight_chunk_count_(0),
291         current_iteration_index_(0) {
292     chunks_.reserve(kTraceEventVectorBufferChunks);
293   }
294
295   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
296     // This function may be called when adding normal events or indirectly from
297     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
298     // have to add the metadata events and flush thread-local buffers even if
299     // the buffer is full.
300     *index = chunks_.size();
301     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
302     ++in_flight_chunk_count_;
303     // + 1 because zero chunk_seq is not allowed.
304     return scoped_ptr<TraceBufferChunk>(
305         new TraceBufferChunk(static_cast<uint32>(*index) + 1));
306   }
307
308   virtual void ReturnChunk(size_t index,
309                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
310     DCHECK_GT(in_flight_chunk_count_, 0u);
311     DCHECK_LT(index, chunks_.size());
312     DCHECK(!chunks_[index]);
313     --in_flight_chunk_count_;
314     chunks_[index] = chunk.release();
315   }
316
317   virtual bool IsFull() const OVERRIDE {
318     return chunks_.size() >= kTraceEventVectorBufferChunks;
319   }
320
321   virtual size_t Size() const OVERRIDE {
322     // This is approximate because not all of the chunks are full.
323     return chunks_.size() * kTraceBufferChunkSize;
324   }
325
326   virtual size_t Capacity() const OVERRIDE {
327     return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
328   }
329
330   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
331     if (handle.chunk_index >= chunks_.size())
332       return NULL;
333     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
334     if (!chunk || chunk->seq() != handle.chunk_seq)
335       return NULL;
336     return chunk->GetEventAt(handle.event_index);
337   }
338
339   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
340     while (current_iteration_index_ < chunks_.size()) {
341       // Skip in-flight chunks.
342       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
343       if (chunk)
344         return chunk;
345     }
346     return NULL;
347   }
348
349   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
350     NOTIMPLEMENTED();
351     return scoped_ptr<TraceBuffer>();
352   }
353
354  private:
355   size_t in_flight_chunk_count_;
356   size_t current_iteration_index_;
357   ScopedVector<TraceBufferChunk> chunks_;
358
359   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
360 };
361
362 template <typename T>
363 void InitializeMetadataEvent(TraceEvent* trace_event,
364                              int thread_id,
365                              const char* metadata_name, const char* arg_name,
366                              const T& value) {
367   if (!trace_event)
368     return;
369
370   int num_args = 1;
371   unsigned char arg_type;
372   unsigned long long arg_value;
373   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
374   trace_event->Initialize(thread_id,
375                           TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
376                           &g_category_group_enabled[g_category_metadata],
377                           metadata_name, ::trace_event_internal::kNoEventId,
378                           num_args, &arg_name, &arg_type, &arg_value, NULL,
379                           TRACE_EVENT_FLAG_NONE);
380 }
381
382 class AutoThreadLocalBoolean {
383  public:
384   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
385       : thread_local_boolean_(thread_local_boolean) {
386     DCHECK(!thread_local_boolean_->Get());
387     thread_local_boolean_->Set(true);
388   }
389   ~AutoThreadLocalBoolean() {
390     thread_local_boolean_->Set(false);
391   }
392
393  private:
394   ThreadLocalBoolean* thread_local_boolean_;
395   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
396 };
397
398 }  // namespace
399
400 void TraceBufferChunk::Reset(uint32 new_seq) {
401   for (size_t i = 0; i < next_free_; ++i)
402     chunk_[i].Reset();
403   next_free_ = 0;
404   seq_ = new_seq;
405 }
406
407 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
408   DCHECK(!IsFull());
409   *event_index = next_free_++;
410   return &chunk_[*event_index];
411 }
412
413 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
414   scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
415   cloned_chunk->next_free_ = next_free_;
416   for (size_t i = 0; i < next_free_; ++i)
417     cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
418   return cloned_chunk.Pass();
419 }
420
421 // A helper class that allows the lock to be acquired in the middle of the scope
422 // and unlocks at the end of scope if locked.
423 class TraceLog::OptionalAutoLock {
424  public:
425   explicit OptionalAutoLock(Lock& lock)
426       : lock_(lock),
427         locked_(false) {
428   }
429
430   ~OptionalAutoLock() {
431     if (locked_)
432       lock_.Release();
433   }
434
435   void EnsureAcquired() {
436     if (!locked_) {
437       lock_.Acquire();
438       locked_ = true;
439     }
440   }
441
442  private:
443   Lock& lock_;
444   bool locked_;
445   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
446 };
447
448 // Use this function instead of TraceEventHandle constructor to keep the
449 // overhead of ScopedTracer (trace_event.h) constructor minimum.
450 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
451                 TraceEventHandle* handle) {
452   DCHECK(chunk_seq);
453   DCHECK(chunk_index < (1u << 16));
454   DCHECK(event_index < (1u << 16));
455   handle->chunk_seq = chunk_seq;
456   handle->chunk_index = static_cast<uint16>(chunk_index);
457   handle->event_index = static_cast<uint16>(event_index);
458 }
459
460 ////////////////////////////////////////////////////////////////////////////////
461 //
462 // TraceEvent
463 //
464 ////////////////////////////////////////////////////////////////////////////////
465
466 namespace {
467
468 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
469
470 // Copies |*member| into |*buffer|, sets |*member| to point to this new
471 // location, and then advances |*buffer| by the amount written.
472 void CopyTraceEventParameter(char** buffer,
473                              const char** member,
474                              const char* end) {
475   if (*member) {
476     size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
477     DCHECK_LE(static_cast<int>(written), end - *buffer);
478     *member = *buffer;
479     *buffer += written;
480   }
481 }
482
483 }  // namespace
484
485 TraceEvent::TraceEvent()
486     : duration_(TimeDelta::FromInternalValue(-1)),
487       id_(0u),
488       category_group_enabled_(NULL),
489       name_(NULL),
490       thread_id_(0),
491       phase_(TRACE_EVENT_PHASE_BEGIN),
492       flags_(0) {
493   for (int i = 0; i < kTraceMaxNumArgs; ++i)
494     arg_names_[i] = NULL;
495   memset(arg_values_, 0, sizeof(arg_values_));
496 }
497
498 TraceEvent::~TraceEvent() {
499 }
500
501 void TraceEvent::CopyFrom(const TraceEvent& other) {
502   timestamp_ = other.timestamp_;
503   thread_timestamp_ = other.thread_timestamp_;
504   duration_ = other.duration_;
505   id_ = other.id_;
506   category_group_enabled_ = other.category_group_enabled_;
507   name_ = other.name_;
508   thread_id_ = other.thread_id_;
509   phase_ = other.phase_;
510   flags_ = other.flags_;
511   parameter_copy_storage_ = other.parameter_copy_storage_;
512
513   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
514     arg_names_[i] = other.arg_names_[i];
515     arg_types_[i] = other.arg_types_[i];
516     arg_values_[i] = other.arg_values_[i];
517     convertable_values_[i] = other.convertable_values_[i];
518   }
519 }
520
521 void TraceEvent::Initialize(
522     int thread_id,
523     TimeTicks timestamp,
524     TimeTicks thread_timestamp,
525     char phase,
526     const unsigned char* category_group_enabled,
527     const char* name,
528     unsigned long long id,
529     int num_args,
530     const char** arg_names,
531     const unsigned char* arg_types,
532     const unsigned long long* arg_values,
533     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
534     unsigned char flags) {
535   timestamp_ = timestamp;
536   thread_timestamp_ = thread_timestamp;
537   duration_ = TimeDelta::FromInternalValue(-1);
538   id_ = id;
539   category_group_enabled_ = category_group_enabled;
540   name_ = name;
541   thread_id_ = thread_id;
542   phase_ = phase;
543   flags_ = flags;
544
545   // Clamp num_args since it may have been set by a third_party library.
546   num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
547   int i = 0;
548   for (; i < num_args; ++i) {
549     arg_names_[i] = arg_names[i];
550     arg_types_[i] = arg_types[i];
551
552     if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
553       convertable_values_[i] = convertable_values[i];
554     else
555       arg_values_[i].as_uint = arg_values[i];
556   }
557   for (; i < kTraceMaxNumArgs; ++i) {
558     arg_names_[i] = NULL;
559     arg_values_[i].as_uint = 0u;
560     convertable_values_[i] = NULL;
561     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
562   }
563
564   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
565   size_t alloc_size = 0;
566   if (copy) {
567     alloc_size += GetAllocLength(name);
568     for (i = 0; i < num_args; ++i) {
569       alloc_size += GetAllocLength(arg_names_[i]);
570       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
571         arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
572     }
573   }
574
575   bool arg_is_copy[kTraceMaxNumArgs];
576   for (i = 0; i < num_args; ++i) {
577     // No copying of convertable types, we retain ownership.
578     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
579       continue;
580
581     // We only take a copy of arg_vals if they are of type COPY_STRING.
582     arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
583     if (arg_is_copy[i])
584       alloc_size += GetAllocLength(arg_values_[i].as_string);
585   }
586
587   if (alloc_size) {
588     parameter_copy_storage_ = new RefCountedString;
589     parameter_copy_storage_->data().resize(alloc_size);
590     char* ptr = string_as_array(&parameter_copy_storage_->data());
591     const char* end = ptr + alloc_size;
592     if (copy) {
593       CopyTraceEventParameter(&ptr, &name_, end);
594       for (i = 0; i < num_args; ++i) {
595         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
596       }
597     }
598     for (i = 0; i < num_args; ++i) {
599       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
600         continue;
601       if (arg_is_copy[i])
602         CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
603     }
604     DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
605   }
606 }
607
608 void TraceEvent::Reset() {
609   // Only reset fields that won't be initialized in Initialize(), or that may
610   // hold references to other objects.
611   duration_ = TimeDelta::FromInternalValue(-1);
612   parameter_copy_storage_ = NULL;
613   for (int i = 0; i < kTraceMaxNumArgs; ++i)
614     convertable_values_[i] = NULL;
615 }
616
617 void TraceEvent::UpdateDuration(const TimeTicks& now,
618                                 const TimeTicks& thread_now) {
619   DCHECK(duration_.ToInternalValue() == -1);
620   duration_ = now - timestamp_;
621   thread_duration_ = thread_now - thread_timestamp_;
622 }
623
624 // static
625 void TraceEvent::AppendValueAsJSON(unsigned char type,
626                                    TraceEvent::TraceValue value,
627                                    std::string* out) {
628   switch (type) {
629     case TRACE_VALUE_TYPE_BOOL:
630       *out += value.as_bool ? "true" : "false";
631       break;
632     case TRACE_VALUE_TYPE_UINT:
633       StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
634       break;
635     case TRACE_VALUE_TYPE_INT:
636       StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
637       break;
638     case TRACE_VALUE_TYPE_DOUBLE: {
639       // FIXME: base/json/json_writer.cc is using the same code,
640       //        should be made into a common method.
641       std::string real;
642       double val = value.as_double;
643       if (IsFinite(val)) {
644         real = DoubleToString(val);
645         // Ensure that the number has a .0 if there's no decimal or 'e'.  This
646         // makes sure that when we read the JSON back, it's interpreted as a
647         // real rather than an int.
648         if (real.find('.') == std::string::npos &&
649             real.find('e') == std::string::npos &&
650             real.find('E') == std::string::npos) {
651           real.append(".0");
652         }
653         // The JSON spec requires that non-integer values in the range (-1,1)
654         // have a zero before the decimal point - ".52" is not valid, "0.52" is.
655         if (real[0] == '.') {
656           real.insert(0, "0");
657         } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
658           // "-.1" bad "-0.1" good
659           real.insert(1, "0");
660         }
661       } else if (IsNaN(val)){
662         // The JSON spec doesn't allow NaN and Infinity (since these are
663         // objects in EcmaScript).  Use strings instead.
664         real = "\"NaN\"";
665       } else if (val < 0) {
666         real = "\"-Infinity\"";
667       } else {
668         real = "\"Infinity\"";
669       }
670       StringAppendF(out, "%s", real.c_str());
671       break;
672     }
673     case TRACE_VALUE_TYPE_POINTER:
674       // JSON only supports double and int numbers.
675       // So as not to lose bits from a 64-bit pointer, output as a hex string.
676       StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
677                                      reinterpret_cast<intptr_t>(
678                                      value.as_pointer)));
679       break;
680     case TRACE_VALUE_TYPE_STRING:
681     case TRACE_VALUE_TYPE_COPY_STRING:
682       EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
683       break;
684     default:
685       NOTREACHED() << "Don't know how to print this value";
686       break;
687   }
688 }
689
690 void TraceEvent::AppendAsJSON(std::string* out) const {
691   int64 time_int64 = timestamp_.ToInternalValue();
692   int process_id = TraceLog::GetInstance()->process_id();
693   // Category group checked at category creation time.
694   DCHECK(!strchr(name_, '"'));
695   StringAppendF(out,
696       "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
697       "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
698       TraceLog::GetCategoryGroupName(category_group_enabled_),
699       process_id,
700       thread_id_,
701       time_int64,
702       phase_,
703       name_);
704
705   // Output argument names and values, stop at first NULL argument name.
706   for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
707     if (i > 0)
708       *out += ",";
709     *out += "\"";
710     *out += arg_names_[i];
711     *out += "\":";
712
713     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
714       convertable_values_[i]->AppendAsTraceFormat(out);
715     else
716       AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
717   }
718   *out += "}";
719
720   if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
721     int64 duration = duration_.ToInternalValue();
722     if (duration != -1)
723       StringAppendF(out, ",\"dur\":%" PRId64, duration);
724     if (!thread_timestamp_.is_null()) {
725       int64 thread_duration = thread_duration_.ToInternalValue();
726       if (thread_duration != -1)
727         StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
728     }
729   }
730
731   // Output tts if thread_timestamp is valid.
732   if (!thread_timestamp_.is_null()) {
733     int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
734     StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
735   }
736
737   // If id_ is set, print it out as a hex string so we don't loose any
738   // bits (it might be a 64-bit pointer).
739   if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
740     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
741
742   // Instant events also output their scope.
743   if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
744     char scope = '?';
745     switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
746       case TRACE_EVENT_SCOPE_GLOBAL:
747         scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
748         break;
749
750       case TRACE_EVENT_SCOPE_PROCESS:
751         scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
752         break;
753
754       case TRACE_EVENT_SCOPE_THREAD:
755         scope = TRACE_EVENT_SCOPE_NAME_THREAD;
756         break;
757     }
758     StringAppendF(out, ",\"s\":\"%c\"", scope);
759   }
760
761   *out += "}";
762 }
763
764 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
765   *out << name_ << "[";
766   *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
767   *out << "]";
768   if (arg_names_[0]) {
769     *out << ", {";
770     for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
771       if (i > 0)
772         *out << ", ";
773       *out << arg_names_[i] << ":";
774       std::string value_as_text;
775
776       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
777         convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
778       else
779         AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
780
781       *out << value_as_text;
782     }
783     *out << "}";
784   }
785 }
786
787 ////////////////////////////////////////////////////////////////////////////////
788 //
789 // TraceResultBuffer
790 //
791 ////////////////////////////////////////////////////////////////////////////////
792
793 TraceResultBuffer::OutputCallback
794     TraceResultBuffer::SimpleOutput::GetCallback() {
795   return Bind(&SimpleOutput::Append, Unretained(this));
796 }
797
798 void TraceResultBuffer::SimpleOutput::Append(
799     const std::string& json_trace_output) {
800   json_output += json_trace_output;
801 }
802
803 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
804 }
805
806 TraceResultBuffer::~TraceResultBuffer() {
807 }
808
809 void TraceResultBuffer::SetOutputCallback(
810     const OutputCallback& json_chunk_callback) {
811   output_callback_ = json_chunk_callback;
812 }
813
814 void TraceResultBuffer::Start() {
815   append_comma_ = false;
816   output_callback_.Run("[");
817 }
818
819 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
820   if (append_comma_)
821     output_callback_.Run(",");
822   append_comma_ = true;
823   output_callback_.Run(trace_fragment);
824 }
825
826 void TraceResultBuffer::Finish() {
827   output_callback_.Run("]");
828 }
829
830 ////////////////////////////////////////////////////////////////////////////////
831 //
832 // TraceSamplingThread
833 //
834 ////////////////////////////////////////////////////////////////////////////////
835 class TraceBucketData;
836 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
837
838 class TraceBucketData {
839  public:
840   TraceBucketData(base::subtle::AtomicWord* bucket,
841                   const char* name,
842                   TraceSampleCallback callback);
843   ~TraceBucketData();
844
845   TRACE_EVENT_API_ATOMIC_WORD* bucket;
846   const char* bucket_name;
847   TraceSampleCallback callback;
848 };
849
850 // This object must be created on the IO thread.
851 class TraceSamplingThread : public PlatformThread::Delegate {
852  public:
853   TraceSamplingThread();
854   virtual ~TraceSamplingThread();
855
856   // Implementation of PlatformThread::Delegate:
857   virtual void ThreadMain() OVERRIDE;
858
859   static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
860
861   void Stop();
862   void WaitSamplingEventForTesting();
863
864  private:
865   friend class TraceLog;
866
867   void GetSamples();
868   // Not thread-safe. Once the ThreadMain has been called, this can no longer
869   // be called.
870   void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
871                             const char* const name,
872                             TraceSampleCallback callback);
873   // Splits a combined "category\0name" into the two component parts.
874   static void ExtractCategoryAndName(const char* combined,
875                                      const char** category,
876                                      const char** name);
877   std::vector<TraceBucketData> sample_buckets_;
878   bool thread_running_;
879   CancellationFlag cancellation_flag_;
880   WaitableEvent waitable_event_for_testing_;
881 };
882
883
884 TraceSamplingThread::TraceSamplingThread()
885     : thread_running_(false),
886       waitable_event_for_testing_(false, false) {
887 }
888
889 TraceSamplingThread::~TraceSamplingThread() {
890 }
891
892 void TraceSamplingThread::ThreadMain() {
893   PlatformThread::SetName("Sampling Thread");
894   thread_running_ = true;
895   const int kSamplingFrequencyMicroseconds = 1000;
896   while (!cancellation_flag_.IsSet()) {
897     PlatformThread::Sleep(
898         TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
899     GetSamples();
900     waitable_event_for_testing_.Signal();
901   }
902 }
903
904 // static
905 void TraceSamplingThread::DefaultSamplingCallback(
906     TraceBucketData* bucket_data) {
907   TRACE_EVENT_API_ATOMIC_WORD category_and_name =
908       TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
909   if (!category_and_name)
910     return;
911   const char* const combined =
912       reinterpret_cast<const char* const>(category_and_name);
913   const char* category_group;
914   const char* name;
915   ExtractCategoryAndName(combined, &category_group, &name);
916   TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
917       TraceLog::GetCategoryGroupEnabled(category_group),
918       name, 0, 0, NULL, NULL, NULL, NULL, 0);
919 }
920
921 void TraceSamplingThread::GetSamples() {
922   for (size_t i = 0; i < sample_buckets_.size(); ++i) {
923     TraceBucketData* bucket_data = &sample_buckets_[i];
924     bucket_data->callback.Run(bucket_data);
925   }
926 }
927
928 void TraceSamplingThread::RegisterSampleBucket(
929     TRACE_EVENT_API_ATOMIC_WORD* bucket,
930     const char* const name,
931     TraceSampleCallback callback) {
932   // Access to sample_buckets_ doesn't cause races with the sampling thread
933   // that uses the sample_buckets_, because it is guaranteed that
934   // RegisterSampleBucket is called before the sampling thread is created.
935   DCHECK(!thread_running_);
936   sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
937 }
938
939 // static
940 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
941                                                  const char** category,
942                                                  const char** name) {
943   *category = combined;
944   *name = &combined[strlen(combined) + 1];
945 }
946
947 void TraceSamplingThread::Stop() {
948   cancellation_flag_.Set();
949 }
950
951 void TraceSamplingThread::WaitSamplingEventForTesting() {
952   waitable_event_for_testing_.Wait();
953 }
954
955 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
956                                  const char* name,
957                                  TraceSampleCallback callback)
958     : bucket(bucket),
959       bucket_name(name),
960       callback(callback) {
961 }
962
963 TraceBucketData::~TraceBucketData() {
964 }
965
966 ////////////////////////////////////////////////////////////////////////////////
967 //
968 // TraceLog
969 //
970 ////////////////////////////////////////////////////////////////////////////////
971
972 class TraceLog::ThreadLocalEventBuffer
973     : public MessageLoop::DestructionObserver {
974  public:
975   ThreadLocalEventBuffer(TraceLog* trace_log);
976   virtual ~ThreadLocalEventBuffer();
977
978   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
979
980   void ReportOverhead(const TimeTicks& event_timestamp,
981                       const TimeTicks& event_thread_timestamp);
982
983   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
984     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
985         handle.chunk_index != chunk_index_)
986       return NULL;
987
988     return chunk_->GetEventAt(handle.event_index);
989   }
990
991   int generation() const { return generation_; }
992
993  private:
994   // MessageLoop::DestructionObserver
995   virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
996
997   void FlushWhileLocked();
998
999   void CheckThisIsCurrentBuffer() const {
1000     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1001   }
1002
1003   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1004   // as long as the thread exists.
1005   TraceLog* trace_log_;
1006   scoped_ptr<TraceBufferChunk> chunk_;
1007   size_t chunk_index_;
1008   int event_count_;
1009   TimeDelta overhead_;
1010   int generation_;
1011
1012   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
1013 };
1014
1015 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
1016     : trace_log_(trace_log),
1017       chunk_index_(0),
1018       event_count_(0),
1019       generation_(trace_log->generation()) {
1020   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1021   // the following message_loop won't be NULL.
1022   MessageLoop* message_loop = MessageLoop::current();
1023   message_loop->AddDestructionObserver(this);
1024
1025   AutoLock lock(trace_log->lock_);
1026   trace_log->thread_message_loops_.insert(message_loop);
1027 }
1028
1029 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1030   CheckThisIsCurrentBuffer();
1031   MessageLoop::current()->RemoveDestructionObserver(this);
1032
1033   // Zero event_count_ happens in either of the following cases:
1034   // - no event generated for the thread;
1035   // - the thread has no message loop;
1036   // - trace_event_overhead is disabled.
1037   if (event_count_) {
1038     InitializeMetadataEvent(AddTraceEvent(NULL),
1039                             static_cast<int>(base::PlatformThread::CurrentId()),
1040                             "overhead", "average_overhead",
1041                             overhead_.InMillisecondsF() / event_count_);
1042   }
1043
1044   {
1045     AutoLock lock(trace_log_->lock_);
1046     FlushWhileLocked();
1047     trace_log_->thread_message_loops_.erase(MessageLoop::current());
1048   }
1049   trace_log_->thread_local_event_buffer_.Set(NULL);
1050 }
1051
1052 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1053     TraceEventHandle* handle) {
1054   CheckThisIsCurrentBuffer();
1055
1056   if (chunk_ && chunk_->IsFull()) {
1057     AutoLock lock(trace_log_->lock_);
1058     FlushWhileLocked();
1059     chunk_.reset();
1060   }
1061   if (!chunk_) {
1062     AutoLock lock(trace_log_->lock_);
1063     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
1064     trace_log_->CheckIfBufferIsFullWhileLocked();
1065   }
1066   if (!chunk_)
1067     return NULL;
1068
1069   size_t event_index;
1070   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1071   if (trace_event && handle)
1072     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1073
1074   return trace_event;
1075 }
1076
1077 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1078     const TimeTicks& event_timestamp,
1079     const TimeTicks& event_thread_timestamp) {
1080   if (!g_category_group_enabled[g_category_trace_event_overhead])
1081     return;
1082
1083   CheckThisIsCurrentBuffer();
1084
1085   event_count_++;
1086   TimeTicks thread_now = ThreadNow();
1087   TimeTicks now = trace_log_->OffsetNow();
1088   TimeDelta overhead = now - event_timestamp;
1089   if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1090     TraceEvent* trace_event = AddTraceEvent(NULL);
1091     if (trace_event) {
1092       trace_event->Initialize(
1093           static_cast<int>(PlatformThread::CurrentId()),
1094           event_timestamp, event_thread_timestamp,
1095           TRACE_EVENT_PHASE_COMPLETE,
1096           &g_category_group_enabled[g_category_trace_event_overhead],
1097           "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
1098       trace_event->UpdateDuration(now, thread_now);
1099     }
1100   }
1101   overhead_ += overhead;
1102 }
1103
1104 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1105   delete this;
1106 }
1107
1108 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1109   if (!chunk_)
1110     return;
1111
1112   trace_log_->lock_.AssertAcquired();
1113   if (trace_log_->CheckGeneration(generation_)) {
1114     // Return the chunk to the buffer only if the generation matches.
1115     trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1116   }
1117   // Otherwise this method may be called from the destructor, or TraceLog will
1118   // find the generation mismatch and delete this buffer soon.
1119 }
1120
1121 // static
1122 TraceLog* TraceLog::GetInstance() {
1123   return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1124 }
1125
1126 TraceLog::TraceLog()
1127     : mode_(DISABLED),
1128       num_traces_recorded_(0),
1129       event_callback_(0),
1130       dispatching_to_observer_list_(false),
1131       process_sort_index_(0),
1132       process_id_hash_(0),
1133       process_id_(0),
1134       watch_category_(0),
1135       trace_options_(RECORD_UNTIL_FULL),
1136       sampling_thread_handle_(0),
1137       category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1138       event_callback_category_filter_(
1139           CategoryFilter::kDefaultCategoryFilterString),
1140       thread_shared_chunk_index_(0),
1141       generation_(0) {
1142   // Trace is enabled or disabled on one thread while other threads are
1143   // accessing the enabled flag. We don't care whether edge-case events are
1144   // traced or not, so we allow races on the enabled flag to keep the trace
1145   // macros fast.
1146   // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1147   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1148   //                            sizeof(g_category_group_enabled),
1149   //                           "trace_event category enabled");
1150   for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
1151     ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
1152                          "trace_event category enabled");
1153   }
1154 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
1155   SetProcessID(0);
1156 #else
1157   SetProcessID(static_cast<int>(GetCurrentProcId()));
1158
1159   // NaCl also shouldn't access the command line.
1160   if (CommandLine::InitializedForCurrentProcess() &&
1161       CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
1162     std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1163         switches::kTraceToConsole);
1164     if (filter.empty()) {
1165       filter = kEchoToConsoleCategoryFilter;
1166     } else {
1167       filter.append(",");
1168       filter.append(kEchoToConsoleCategoryFilter);
1169     }
1170
1171     LOG(ERROR) << "Start " << switches::kTraceToConsole
1172                << " with CategoryFilter '" << filter << "'.";
1173     SetEnabled(CategoryFilter(filter), RECORDING_MODE, ECHO_TO_CONSOLE);
1174   }
1175 #endif
1176
1177   logged_events_.reset(CreateTraceBuffer());
1178 }
1179
1180 TraceLog::~TraceLog() {
1181 }
1182
1183 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1184     const char* category_group) {
1185   TraceLog* tracelog = GetInstance();
1186   if (!tracelog) {
1187     DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
1188     return &g_category_group_enabled[g_category_already_shutdown];
1189   }
1190   return tracelog->GetCategoryGroupEnabledInternal(category_group);
1191 }
1192
1193 const char* TraceLog::GetCategoryGroupName(
1194     const unsigned char* category_group_enabled) {
1195   // Calculate the index of the category group by finding
1196   // category_group_enabled in g_category_group_enabled array.
1197   uintptr_t category_begin =
1198       reinterpret_cast<uintptr_t>(g_category_group_enabled);
1199   uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
1200   DCHECK(category_ptr >= category_begin &&
1201          category_ptr < reinterpret_cast<uintptr_t>(
1202              g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1203       "out of bounds category pointer";
1204   uintptr_t category_index =
1205       (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1206   return g_category_groups[category_index];
1207 }
1208
1209 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1210   unsigned char enabled_flag = 0;
1211   const char* category_group = g_category_groups[category_index];
1212   if (mode_ == RECORDING_MODE &&
1213       category_filter_.IsCategoryGroupEnabled(category_group))
1214     enabled_flag |= ENABLED_FOR_RECORDING;
1215   else if (mode_ == MONITORING_MODE &&
1216       category_filter_.IsCategoryGroupEnabled(category_group))
1217     enabled_flag |= ENABLED_FOR_MONITORING;
1218   if (event_callback_ &&
1219       event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
1220     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1221   g_category_group_enabled[category_index] = enabled_flag;
1222 }
1223
1224 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1225   for (int i = 0; i < g_category_index; i++)
1226     UpdateCategoryGroupEnabledFlag(i);
1227 }
1228
1229 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1230   ResetTraceEventSyntheticDelays();
1231   const CategoryFilter::StringList& delays =
1232       category_filter_.GetSyntheticDelayValues();
1233   CategoryFilter::StringList::const_iterator ci;
1234   for (ci = delays.begin(); ci != delays.end(); ++ci) {
1235     StringTokenizer tokens(*ci, ";");
1236     if (!tokens.GetNext())
1237       continue;
1238     TraceEventSyntheticDelay* delay =
1239         TraceEventSyntheticDelay::Lookup(tokens.token());
1240     while (tokens.GetNext()) {
1241       std::string token = tokens.token();
1242       char* duration_end;
1243       double target_duration = strtod(token.c_str(), &duration_end);
1244       if (duration_end != token.c_str()) {
1245         delay->SetTargetDuration(
1246             TimeDelta::FromMicroseconds(target_duration * 1e6));
1247       } else if (token == "static") {
1248         delay->SetMode(TraceEventSyntheticDelay::STATIC);
1249       } else if (token == "oneshot") {
1250         delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
1251       } else if (token == "alternating") {
1252         delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
1253       }
1254     }
1255   }
1256 }
1257
1258 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1259     const char* category_group) {
1260   DCHECK(!strchr(category_group, '"')) <<
1261       "Category groups may not contain double quote";
1262   AutoLock lock(lock_);
1263
1264   unsigned char* category_group_enabled = NULL;
1265   // Search for pre-existing category group.
1266   for (int i = 0; i < g_category_index; i++) {
1267     if (strcmp(g_category_groups[i], category_group) == 0) {
1268       category_group_enabled = &g_category_group_enabled[i];
1269       break;
1270     }
1271   }
1272
1273   if (!category_group_enabled) {
1274     // Create a new category group
1275     DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
1276         "must increase MAX_CATEGORY_GROUPS";
1277     if (g_category_index < MAX_CATEGORY_GROUPS) {
1278       int new_index = g_category_index++;
1279       // Don't hold on to the category_group pointer, so that we can create
1280       // category groups with strings not known at compile time (this is
1281       // required by SetWatchEvent).
1282       const char* new_group = strdup(category_group);
1283       ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1284       g_category_groups[new_index] = new_group;
1285       DCHECK(!g_category_group_enabled[new_index]);
1286       // Note that if both included and excluded patterns in the
1287       // CategoryFilter are empty, we exclude nothing,
1288       // thereby enabling this category group.
1289       UpdateCategoryGroupEnabledFlag(new_index);
1290       category_group_enabled = &g_category_group_enabled[new_index];
1291     } else {
1292       category_group_enabled =
1293           &g_category_group_enabled[g_category_categories_exhausted];
1294     }
1295   }
1296   return category_group_enabled;
1297 }
1298
1299 void TraceLog::GetKnownCategoryGroups(
1300     std::vector<std::string>* category_groups) {
1301   AutoLock lock(lock_);
1302   category_groups->push_back(
1303       g_category_groups[g_category_trace_event_overhead]);
1304   for (int i = g_num_builtin_categories; i < g_category_index; i++)
1305     category_groups->push_back(g_category_groups[i]);
1306 }
1307
1308 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
1309                           Mode mode,
1310                           Options options) {
1311   std::vector<EnabledStateObserver*> observer_list;
1312   {
1313     AutoLock lock(lock_);
1314
1315     // Can't enable tracing when Flush() is in progress.
1316     DCHECK(!flush_message_loop_proxy_.get());
1317
1318     Options old_options = trace_options();
1319
1320     if (IsEnabled()) {
1321       if (options != old_options) {
1322         DLOG(ERROR) << "Attemting to re-enable tracing with a different "
1323                     << "set of options.";
1324       }
1325
1326       if (mode != mode_) {
1327         DLOG(ERROR) << "Attemting to re-enable tracing with a different mode.";
1328       }
1329
1330       category_filter_.Merge(category_filter);
1331       UpdateCategoryGroupEnabledFlags();
1332       return;
1333     }
1334
1335     if (dispatching_to_observer_list_) {
1336       DLOG(ERROR) <<
1337           "Cannot manipulate TraceLog::Enabled state from an observer.";
1338       return;
1339     }
1340
1341     mode_ = mode;
1342
1343     if (options != old_options) {
1344       subtle::NoBarrier_Store(&trace_options_, options);
1345       UseNextTraceBuffer();
1346     }
1347
1348     num_traces_recorded_++;
1349
1350     category_filter_ = CategoryFilter(category_filter);
1351     UpdateCategoryGroupEnabledFlags();
1352     UpdateSyntheticDelaysFromCategoryFilter();
1353
1354     if (options & ENABLE_SAMPLING) {
1355       sampling_thread_.reset(new TraceSamplingThread);
1356       sampling_thread_->RegisterSampleBucket(
1357           &g_trace_state[0],
1358           "bucket0",
1359           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1360       sampling_thread_->RegisterSampleBucket(
1361           &g_trace_state[1],
1362           "bucket1",
1363           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1364       sampling_thread_->RegisterSampleBucket(
1365           &g_trace_state[2],
1366           "bucket2",
1367           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1368       if (!PlatformThread::Create(
1369             0, sampling_thread_.get(), &sampling_thread_handle_)) {
1370         DCHECK(false) << "failed to create thread";
1371       }
1372     }
1373
1374     dispatching_to_observer_list_ = true;
1375     observer_list = enabled_state_observer_list_;
1376   }
1377   // Notify observers outside the lock in case they trigger trace events.
1378   for (size_t i = 0; i < observer_list.size(); ++i)
1379     observer_list[i]->OnTraceLogEnabled();
1380
1381   {
1382     AutoLock lock(lock_);
1383     dispatching_to_observer_list_ = false;
1384   }
1385 }
1386
1387 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
1388   AutoLock lock(lock_);
1389   return category_filter_;
1390 }
1391
1392 void TraceLog::SetDisabled() {
1393   AutoLock lock(lock_);
1394   SetDisabledWhileLocked();
1395 }
1396
1397 void TraceLog::SetDisabledWhileLocked() {
1398   lock_.AssertAcquired();
1399
1400   if (!IsEnabled())
1401     return;
1402
1403   if (dispatching_to_observer_list_) {
1404     DLOG(ERROR)
1405         << "Cannot manipulate TraceLog::Enabled state from an observer.";
1406     return;
1407   }
1408
1409   mode_ = DISABLED;
1410
1411   if (sampling_thread_.get()) {
1412     // Stop the sampling thread.
1413     sampling_thread_->Stop();
1414     lock_.Release();
1415     PlatformThread::Join(sampling_thread_handle_);
1416     lock_.Acquire();
1417     sampling_thread_handle_ = PlatformThreadHandle();
1418     sampling_thread_.reset();
1419   }
1420
1421   category_filter_.Clear();
1422   subtle::NoBarrier_Store(&watch_category_, 0);
1423   watch_event_name_ = "";
1424   UpdateCategoryGroupEnabledFlags();
1425   AddMetadataEventsWhileLocked();
1426
1427   dispatching_to_observer_list_ = true;
1428   std::vector<EnabledStateObserver*> observer_list =
1429       enabled_state_observer_list_;
1430
1431   {
1432     // Dispatch to observers outside the lock in case the observer triggers a
1433     // trace event.
1434     AutoUnlock unlock(lock_);
1435     for (size_t i = 0; i < observer_list.size(); ++i)
1436       observer_list[i]->OnTraceLogDisabled();
1437   }
1438   dispatching_to_observer_list_ = false;
1439 }
1440
1441 int TraceLog::GetNumTracesRecorded() {
1442   AutoLock lock(lock_);
1443   if (!IsEnabled())
1444     return -1;
1445   return num_traces_recorded_;
1446 }
1447
1448 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1449   enabled_state_observer_list_.push_back(listener);
1450 }
1451
1452 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1453   std::vector<EnabledStateObserver*>::iterator it =
1454       std::find(enabled_state_observer_list_.begin(),
1455                 enabled_state_observer_list_.end(),
1456                 listener);
1457   if (it != enabled_state_observer_list_.end())
1458     enabled_state_observer_list_.erase(it);
1459 }
1460
1461 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1462   std::vector<EnabledStateObserver*>::const_iterator it =
1463       std::find(enabled_state_observer_list_.begin(),
1464                 enabled_state_observer_list_.end(),
1465                 listener);
1466   return it != enabled_state_observer_list_.end();
1467 }
1468
1469 float TraceLog::GetBufferPercentFull() const {
1470   AutoLock lock(lock_);
1471   return static_cast<float>(static_cast<double>(logged_events_->Size()) /
1472                             logged_events_->Capacity());
1473 }
1474
1475 bool TraceLog::BufferIsFull() const {
1476   AutoLock lock(lock_);
1477   return logged_events_->IsFull();
1478 }
1479
1480 TraceBuffer* TraceLog::CreateTraceBuffer() {
1481   Options options = trace_options();
1482   if (options & RECORD_CONTINUOUSLY)
1483     return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
1484   else if ((options & ENABLE_SAMPLING) && mode_ == MONITORING_MODE)
1485     return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
1486   else if (options & ECHO_TO_CONSOLE)
1487     return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1488   return new TraceBufferVector();
1489 }
1490
1491 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1492     TraceEventHandle* handle, bool check_buffer_is_full) {
1493   lock_.AssertAcquired();
1494
1495   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1496     logged_events_->ReturnChunk(thread_shared_chunk_index_,
1497                                 thread_shared_chunk_.Pass());
1498   }
1499
1500   if (!thread_shared_chunk_) {
1501     thread_shared_chunk_ = logged_events_->GetChunk(
1502         &thread_shared_chunk_index_);
1503     if (check_buffer_is_full)
1504       CheckIfBufferIsFullWhileLocked();
1505   }
1506   if (!thread_shared_chunk_)
1507     return NULL;
1508
1509   size_t event_index;
1510   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1511   if (trace_event && handle) {
1512     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1513                event_index, handle);
1514   }
1515   return trace_event;
1516 }
1517
1518 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1519   lock_.AssertAcquired();
1520   if (logged_events_->IsFull())
1521     SetDisabledWhileLocked();
1522 }
1523
1524 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
1525                                        EventCallback cb) {
1526   AutoLock lock(lock_);
1527   subtle::NoBarrier_Store(&event_callback_,
1528                           reinterpret_cast<subtle::AtomicWord>(cb));
1529   event_callback_category_filter_ = category_filter;
1530   UpdateCategoryGroupEnabledFlags();
1531 };
1532
1533 void TraceLog::SetEventCallbackDisabled() {
1534   AutoLock lock(lock_);
1535   subtle::NoBarrier_Store(&event_callback_, 0);
1536   UpdateCategoryGroupEnabledFlags();
1537 }
1538
1539 // Flush() works as the following:
1540 // 1. Flush() is called in threadA whose message loop is saved in
1541 //    flush_message_loop_proxy_;
1542 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1543 //    loop to flush the thread local buffers; otherwise finish the flush;
1544 // 3. FlushCurrentThread() deletes the thread local event buffer:
1545 //    - The last batch of events of the thread are flushed into the main buffer;
1546 //    - The message loop will be removed from thread_message_loops_;
1547 //    If this is the last message loop, finish the flush;
1548 // 4. If any thread hasn't finish its flush in time, finish the flush.
1549 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1550   if (IsEnabled()) {
1551     // Can't flush when tracing is enabled because otherwise PostTask would
1552     // - generate more trace events;
1553     // - deschedule the calling thread on some platforms causing inaccurate
1554     //   timing of the trace events.
1555     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1556     if (!cb.is_null())
1557       cb.Run(empty_result, false);
1558     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1559     return;
1560   }
1561
1562   int generation = this->generation();
1563   {
1564     AutoLock lock(lock_);
1565     DCHECK(!flush_message_loop_proxy_.get());
1566     flush_message_loop_proxy_ = MessageLoopProxy::current();
1567     DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1568     flush_output_callback_ = cb;
1569
1570     if (thread_shared_chunk_) {
1571       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1572                                   thread_shared_chunk_.Pass());
1573     }
1574
1575     if (thread_message_loops_.size()) {
1576       for (hash_set<MessageLoop*>::const_iterator it =
1577            thread_message_loops_.begin();
1578            it != thread_message_loops_.end(); ++it) {
1579         (*it)->PostTask(
1580             FROM_HERE,
1581             Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
1582       }
1583       flush_message_loop_proxy_->PostDelayedTask(
1584           FROM_HERE,
1585           Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
1586           TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1587       return;
1588     }
1589   }
1590
1591   FinishFlush(generation);
1592 }
1593
1594 void TraceLog::ConvertTraceEventsToTraceFormat(
1595     scoped_ptr<TraceBuffer> logged_events,
1596     const TraceLog::OutputCallback& flush_output_callback) {
1597
1598   if (flush_output_callback.is_null())
1599     return;
1600
1601   // The callback need to be called at least once even if there is no events
1602   // to let the caller know the completion of flush.
1603   bool has_more_events = true;
1604   do {
1605     scoped_refptr<RefCountedString> json_events_str_ptr =
1606         new RefCountedString();
1607
1608     for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
1609       const TraceBufferChunk* chunk = logged_events->NextChunk();
1610       if (!chunk) {
1611         has_more_events = false;
1612         break;
1613       }
1614       for (size_t j = 0; j < chunk->size(); ++j) {
1615         if (i > 0 || j > 0)
1616           json_events_str_ptr->data().append(",");
1617         chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
1618       }
1619     }
1620
1621     flush_output_callback.Run(json_events_str_ptr, has_more_events);
1622   } while (has_more_events);
1623 }
1624
1625 void TraceLog::FinishFlush(int generation) {
1626   scoped_ptr<TraceBuffer> previous_logged_events;
1627   OutputCallback flush_output_callback;
1628
1629   if (!CheckGeneration(generation))
1630     return;
1631
1632   {
1633     AutoLock lock(lock_);
1634
1635     previous_logged_events.swap(logged_events_);
1636     UseNextTraceBuffer();
1637     thread_message_loops_.clear();
1638
1639     flush_message_loop_proxy_ = NULL;
1640     flush_output_callback = flush_output_callback_;
1641     flush_output_callback_.Reset();
1642   }
1643
1644   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1645                                   flush_output_callback);
1646 }
1647
1648 // Run in each thread holding a local event buffer.
1649 void TraceLog::FlushCurrentThread(int generation) {
1650   {
1651     AutoLock lock(lock_);
1652     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1653       // This is late. The corresponding flush has finished.
1654       return;
1655     }
1656   }
1657
1658   // This will flush the thread local buffer.
1659   delete thread_local_event_buffer_.Get();
1660
1661   AutoLock lock(lock_);
1662   if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
1663       thread_message_loops_.size())
1664     return;
1665
1666   flush_message_loop_proxy_->PostTask(
1667       FROM_HERE,
1668       Bind(&TraceLog::FinishFlush, Unretained(this), generation));
1669 }
1670
1671 void TraceLog::OnFlushTimeout(int generation) {
1672   {
1673     AutoLock lock(lock_);
1674     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1675       // Flush has finished before timeout.
1676       return;
1677     }
1678
1679     LOG(WARNING) <<
1680         "The following threads haven't finished flush in time. "
1681         "If this happens stably for some thread, please call "
1682         "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1683         "the thread to avoid its trace events from being lost.";
1684     for (hash_set<MessageLoop*>::const_iterator it =
1685          thread_message_loops_.begin();
1686          it != thread_message_loops_.end(); ++it) {
1687       LOG(WARNING) << "Thread: " << (*it)->thread_name();
1688     }
1689   }
1690   FinishFlush(generation);
1691 }
1692
1693 void TraceLog::FlushButLeaveBufferIntact(
1694     const TraceLog::OutputCallback& flush_output_callback) {
1695   scoped_ptr<TraceBuffer> previous_logged_events;
1696   {
1697     AutoLock lock(lock_);
1698     AddMetadataEventsWhileLocked();
1699     if (thread_shared_chunk_) {
1700       // Return the chunk to the main buffer to flush the sampling data.
1701       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1702                                   thread_shared_chunk_.Pass());
1703     }
1704     previous_logged_events = logged_events_->CloneForIteration().Pass();
1705   }  // release lock
1706
1707   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1708                                   flush_output_callback);
1709 }
1710
1711 void TraceLog::UseNextTraceBuffer() {
1712   logged_events_.reset(CreateTraceBuffer());
1713   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1714   thread_shared_chunk_.reset();
1715   thread_shared_chunk_index_ = 0;
1716 }
1717
1718 TraceEventHandle TraceLog::AddTraceEvent(
1719     char phase,
1720     const unsigned char* category_group_enabled,
1721     const char* name,
1722     unsigned long long id,
1723     int num_args,
1724     const char** arg_names,
1725     const unsigned char* arg_types,
1726     const unsigned long long* arg_values,
1727     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1728     unsigned char flags) {
1729   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1730   base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1731   return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1732                                                name, id, thread_id, now,
1733                                                num_args, arg_names,
1734                                                arg_types, arg_values,
1735                                                convertable_values, flags);
1736 }
1737
1738 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1739     char phase,
1740     const unsigned char* category_group_enabled,
1741     const char* name,
1742     unsigned long long id,
1743     int thread_id,
1744     const TimeTicks& timestamp,
1745     int num_args,
1746     const char** arg_names,
1747     const unsigned char* arg_types,
1748     const unsigned long long* arg_values,
1749     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1750     unsigned char flags) {
1751   TraceEventHandle handle = { 0, 0, 0 };
1752   if (!*category_group_enabled)
1753     return handle;
1754
1755   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1756   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1757   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1758   if (thread_is_in_trace_event_.Get())
1759     return handle;
1760
1761   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1762
1763   DCHECK(name);
1764
1765   if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1766     id ^= process_id_hash_;
1767
1768   TimeTicks now = OffsetTimestamp(timestamp);
1769   TimeTicks thread_now = ThreadNow();
1770
1771   ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1772   // A ThreadLocalEventBuffer needs the message loop
1773   // - to know when the thread exits;
1774   // - to handle the final flush.
1775   // For a thread without a message loop or the message loop may be blocked, the
1776   // trace events will be added into the main buffer directly.
1777   if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1778     thread_local_event_buffer = thread_local_event_buffer_.Get();
1779     if (thread_local_event_buffer &&
1780         !CheckGeneration(thread_local_event_buffer->generation())) {
1781       delete thread_local_event_buffer;
1782       thread_local_event_buffer = NULL;
1783     }
1784     if (!thread_local_event_buffer) {
1785       thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1786       thread_local_event_buffer_.Set(thread_local_event_buffer);
1787     }
1788   }
1789
1790   // Check and update the current thread name only if the event is for the
1791   // current thread to avoid locks in most cases.
1792   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1793     const char* new_name = ThreadIdNameManager::GetInstance()->
1794         GetName(thread_id);
1795     // Check if the thread name has been set or changed since the previous
1796     // call (if any), but don't bother if the new name is empty. Note this will
1797     // not detect a thread name change within the same char* buffer address: we
1798     // favor common case performance over corner case correctness.
1799     if (new_name != g_current_thread_name.Get().Get() &&
1800         new_name && *new_name) {
1801       g_current_thread_name.Get().Set(new_name);
1802
1803       AutoLock thread_info_lock(thread_info_lock_);
1804
1805       hash_map<int, std::string>::iterator existing_name =
1806           thread_names_.find(thread_id);
1807       if (existing_name == thread_names_.end()) {
1808         // This is a new thread id, and a new name.
1809         thread_names_[thread_id] = new_name;
1810       } else {
1811         // This is a thread id that we've seen before, but potentially with a
1812         // new name.
1813         std::vector<StringPiece> existing_names;
1814         Tokenize(existing_name->second, ",", &existing_names);
1815         bool found = std::find(existing_names.begin(),
1816                                existing_names.end(),
1817                                new_name) != existing_names.end();
1818         if (!found) {
1819           if (existing_names.size())
1820             existing_name->second.push_back(',');
1821           existing_name->second.append(new_name);
1822         }
1823       }
1824     }
1825   }
1826
1827   std::string console_message;
1828   if (*category_group_enabled &
1829       (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
1830     OptionalAutoLock lock(lock_);
1831
1832     TraceEvent* trace_event = NULL;
1833     if (thread_local_event_buffer) {
1834       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1835     } else {
1836       lock.EnsureAcquired();
1837       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1838     }
1839
1840     if (trace_event) {
1841       trace_event->Initialize(thread_id, now, thread_now, phase,
1842                               category_group_enabled, name, id,
1843                               num_args, arg_names, arg_types, arg_values,
1844                               convertable_values, flags);
1845
1846 #if defined(OS_ANDROID)
1847       trace_event->SendToATrace();
1848 #endif
1849     }
1850
1851     if (trace_options() & ECHO_TO_CONSOLE) {
1852       console_message = EventToConsoleMessage(
1853           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1854           timestamp, trace_event);
1855     }
1856   }
1857
1858   if (console_message.size())
1859     LOG(ERROR) << console_message;
1860
1861   if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1862       &watch_category_)) == category_group_enabled) {
1863     bool event_name_matches;
1864     WatchEventCallback watch_event_callback_copy;
1865     {
1866       AutoLock lock(lock_);
1867       event_name_matches = watch_event_name_ == name;
1868       watch_event_callback_copy = watch_event_callback_;
1869     }
1870     if (event_name_matches) {
1871       if (!watch_event_callback_copy.is_null())
1872         watch_event_callback_copy.Run();
1873     }
1874   }
1875
1876   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1877     EventCallback event_callback = reinterpret_cast<EventCallback>(
1878         subtle::NoBarrier_Load(&event_callback_));
1879     if (event_callback) {
1880       event_callback(now,
1881                      phase == TRACE_EVENT_PHASE_COMPLETE ?
1882                          TRACE_EVENT_PHASE_BEGIN : phase,
1883                      category_group_enabled, name, id,
1884                      num_args, arg_names, arg_types, arg_values,
1885                      flags);
1886     }
1887   }
1888
1889   if (thread_local_event_buffer)
1890     thread_local_event_buffer->ReportOverhead(now, thread_now);
1891
1892   return handle;
1893 }
1894
1895 // May be called when a COMPELETE event ends and the unfinished event has been
1896 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1897 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1898                                             const TimeTicks& timestamp,
1899                                             TraceEvent* trace_event) {
1900   AutoLock thread_info_lock(thread_info_lock_);
1901
1902   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1903   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1904   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1905
1906   TimeDelta duration;
1907   int thread_id = trace_event ?
1908       trace_event->thread_id() : PlatformThread::CurrentId();
1909   if (phase == TRACE_EVENT_PHASE_END) {
1910     duration = timestamp - thread_event_start_times_[thread_id].top();
1911     thread_event_start_times_[thread_id].pop();
1912   }
1913
1914   std::string thread_name = thread_names_[thread_id];
1915   if (thread_colors_.find(thread_name) == thread_colors_.end())
1916     thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1917
1918   std::ostringstream log;
1919   log << base::StringPrintf("%s: \x1b[0;3%dm",
1920                             thread_name.c_str(),
1921                             thread_colors_[thread_name]);
1922
1923   size_t depth = 0;
1924   if (thread_event_start_times_.find(thread_id) !=
1925       thread_event_start_times_.end())
1926     depth = thread_event_start_times_[thread_id].size();
1927
1928   for (size_t i = 0; i < depth; ++i)
1929     log << "| ";
1930
1931   if (trace_event)
1932     trace_event->AppendPrettyPrinted(&log);
1933   if (phase == TRACE_EVENT_PHASE_END)
1934     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1935
1936   log << "\x1b[0;m";
1937
1938   if (phase == TRACE_EVENT_PHASE_BEGIN)
1939     thread_event_start_times_[thread_id].push(timestamp);
1940
1941   return log.str();
1942 }
1943
1944 void TraceLog::AddTraceEventEtw(char phase,
1945                                 const char* name,
1946                                 const void* id,
1947                                 const char* extra) {
1948 #if defined(OS_WIN)
1949   TraceEventETWProvider::Trace(name, phase, id, extra);
1950 #endif
1951   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1952                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1953 }
1954
1955 void TraceLog::AddTraceEventEtw(char phase,
1956                                 const char* name,
1957                                 const void* id,
1958                                 const std::string& extra)
1959 {
1960 #if defined(OS_WIN)
1961   TraceEventETWProvider::Trace(name, phase, id, extra);
1962 #endif
1963   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1964                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1965 }
1966
1967 void TraceLog::UpdateTraceEventDuration(
1968     const unsigned char* category_group_enabled,
1969     const char* name,
1970     TraceEventHandle handle) {
1971   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1972   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1973   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1974   if (thread_is_in_trace_event_.Get())
1975     return;
1976
1977   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1978
1979   TimeTicks thread_now = ThreadNow();
1980   TimeTicks now = OffsetNow();
1981
1982   std::string console_message;
1983   if (*category_group_enabled & ENABLED_FOR_RECORDING) {
1984     OptionalAutoLock lock(lock_);
1985
1986     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1987     if (trace_event) {
1988       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1989       trace_event->UpdateDuration(now, thread_now);
1990 #if defined(OS_ANDROID)
1991       trace_event->SendToATrace();
1992 #endif
1993     }
1994
1995     if (trace_options() & ECHO_TO_CONSOLE) {
1996       console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
1997                                               now, trace_event);
1998     }
1999   }
2000
2001   if (console_message.size())
2002     LOG(ERROR) << console_message;
2003
2004   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
2005     EventCallback event_callback = reinterpret_cast<EventCallback>(
2006         subtle::NoBarrier_Load(&event_callback_));
2007     if (event_callback) {
2008       event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
2009                      trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
2010                      TRACE_EVENT_FLAG_NONE);
2011     }
2012   }
2013 }
2014
2015 void TraceLog::SetWatchEvent(const std::string& category_name,
2016                              const std::string& event_name,
2017                              const WatchEventCallback& callback) {
2018   const unsigned char* category = GetCategoryGroupEnabled(
2019       category_name.c_str());
2020   AutoLock lock(lock_);
2021   subtle::NoBarrier_Store(&watch_category_,
2022                           reinterpret_cast<subtle::AtomicWord>(category));
2023   watch_event_name_ = event_name;
2024   watch_event_callback_ = callback;
2025 }
2026
2027 void TraceLog::CancelWatchEvent() {
2028   AutoLock lock(lock_);
2029   subtle::NoBarrier_Store(&watch_category_, 0);
2030   watch_event_name_ = "";
2031   watch_event_callback_.Reset();
2032 }
2033
2034 void TraceLog::AddMetadataEventsWhileLocked() {
2035   lock_.AssertAcquired();
2036
2037   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
2038   if (process_sort_index_ != 0) {
2039     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2040                             current_thread_id,
2041                             "process_sort_index", "sort_index",
2042                             process_sort_index_);
2043   }
2044
2045   if (process_name_.size()) {
2046     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2047                             current_thread_id,
2048                             "process_name", "name",
2049                             process_name_);
2050   }
2051
2052   if (process_labels_.size() > 0) {
2053     std::vector<std::string> labels;
2054     for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
2055         it != process_labels_.end();
2056         it++) {
2057       labels.push_back(it->second);
2058     }
2059     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2060                             current_thread_id,
2061                             "process_labels", "labels",
2062                             JoinString(labels, ','));
2063   }
2064
2065   // Thread sort indices.
2066   for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
2067       it != thread_sort_indices_.end();
2068       it++) {
2069     if (it->second == 0)
2070       continue;
2071     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2072                             it->first,
2073                             "thread_sort_index", "sort_index",
2074                             it->second);
2075   }
2076
2077   // Thread names.
2078   AutoLock thread_info_lock(thread_info_lock_);
2079   for(hash_map<int, std::string>::iterator it = thread_names_.begin();
2080       it != thread_names_.end();
2081       it++) {
2082     if (it->second.empty())
2083       continue;
2084     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2085                             it->first,
2086                             "thread_name", "name",
2087                             it->second);
2088   }
2089 }
2090
2091 void TraceLog::WaitSamplingEventForTesting() {
2092   if (!sampling_thread_)
2093     return;
2094   sampling_thread_->WaitSamplingEventForTesting();
2095 }
2096
2097 void TraceLog::DeleteForTesting() {
2098   DeleteTraceLogForTesting::Delete();
2099 }
2100
2101 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2102   return GetEventByHandleInternal(handle, NULL);
2103 }
2104
2105 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2106                                                OptionalAutoLock* lock) {
2107   if (!handle.chunk_seq)
2108     return NULL;
2109
2110   if (thread_local_event_buffer_.Get()) {
2111     TraceEvent* trace_event =
2112         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
2113     if (trace_event)
2114       return trace_event;
2115   }
2116
2117   // The event has been out-of-control of the thread local buffer.
2118   // Try to get the event from the main buffer with a lock.
2119   if (lock)
2120     lock->EnsureAcquired();
2121
2122   if (thread_shared_chunk_ &&
2123       handle.chunk_index == thread_shared_chunk_index_) {
2124     return handle.chunk_seq == thread_shared_chunk_->seq() ?
2125         thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
2126   }
2127
2128   return logged_events_->GetEventByHandle(handle);
2129 }
2130
2131 void TraceLog::SetProcessID(int process_id) {
2132   process_id_ = process_id;
2133   // Create a FNV hash from the process ID for XORing.
2134   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2135   unsigned long long offset_basis = 14695981039346656037ull;
2136   unsigned long long fnv_prime = 1099511628211ull;
2137   unsigned long long pid = static_cast<unsigned long long>(process_id_);
2138   process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
2139 }
2140
2141 void TraceLog::SetProcessSortIndex(int sort_index) {
2142   AutoLock lock(lock_);
2143   process_sort_index_ = sort_index;
2144 }
2145
2146 void TraceLog::SetProcessName(const std::string& process_name) {
2147   AutoLock lock(lock_);
2148   process_name_ = process_name;
2149 }
2150
2151 void TraceLog::UpdateProcessLabel(
2152     int label_id, const std::string& current_label) {
2153   if(!current_label.length())
2154     return RemoveProcessLabel(label_id);
2155
2156   AutoLock lock(lock_);
2157   process_labels_[label_id] = current_label;
2158 }
2159
2160 void TraceLog::RemoveProcessLabel(int label_id) {
2161   AutoLock lock(lock_);
2162   base::hash_map<int, std::string>::iterator it = process_labels_.find(
2163       label_id);
2164   if (it == process_labels_.end())
2165     return;
2166
2167   process_labels_.erase(it);
2168 }
2169
2170 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2171   AutoLock lock(lock_);
2172   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
2173 }
2174
2175 void TraceLog::SetTimeOffset(TimeDelta offset) {
2176   time_offset_ = offset;
2177 }
2178
2179 size_t TraceLog::GetObserverCountForTest() const {
2180   return enabled_state_observer_list_.size();
2181 }
2182
2183 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2184   thread_blocks_message_loop_.Set(true);
2185   if (thread_local_event_buffer_.Get()) {
2186     // This will flush the thread local buffer.
2187     delete thread_local_event_buffer_.Get();
2188   }
2189 }
2190
2191 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2192     const std::string& str) {
2193   return  str.empty() ||
2194           str.at(0) == ' ' ||
2195           str.at(str.length() - 1) == ' ';
2196 }
2197
2198 bool CategoryFilter::DoesCategoryGroupContainCategory(
2199     const char* category_group,
2200     const char* category) const {
2201   DCHECK(category);
2202   CStringTokenizer category_group_tokens(category_group,
2203                           category_group + strlen(category_group), ",");
2204   while (category_group_tokens.GetNext()) {
2205     std::string category_group_token = category_group_tokens.token();
2206     // Don't allow empty tokens, nor tokens with leading or trailing space.
2207     DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2208         category_group_token))
2209         << "Disallowed category string";
2210     if (MatchPattern(category_group_token.c_str(), category))
2211       return true;
2212   }
2213   return false;
2214 }
2215
2216 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2217   if (!filter_string.empty())
2218     Initialize(filter_string);
2219   else
2220     Initialize(CategoryFilter::kDefaultCategoryFilterString);
2221 }
2222
2223 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2224     : included_(cf.included_),
2225       disabled_(cf.disabled_),
2226       excluded_(cf.excluded_),
2227       delays_(cf.delays_) {
2228 }
2229
2230 CategoryFilter::~CategoryFilter() {
2231 }
2232
2233 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2234   if (this == &rhs)
2235     return *this;
2236
2237   included_ = rhs.included_;
2238   disabled_ = rhs.disabled_;
2239   excluded_ = rhs.excluded_;
2240   delays_ = rhs.delays_;
2241   return *this;
2242 }
2243
2244 void CategoryFilter::Initialize(const std::string& filter_string) {
2245   // Tokenize list of categories, delimited by ','.
2246   StringTokenizer tokens(filter_string, ",");
2247   // Add each token to the appropriate list (included_,excluded_).
2248   while (tokens.GetNext()) {
2249     std::string category = tokens.token();
2250     // Ignore empty categories.
2251     if (category.empty())
2252       continue;
2253     // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2254     if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
2255         category.at(category.size() - 1) == ')') {
2256       category = category.substr(
2257           strlen(kSyntheticDelayCategoryFilterPrefix),
2258           category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
2259       size_t name_length = category.find(';');
2260       if (name_length != std::string::npos && name_length > 0 &&
2261           name_length != category.size() - 1) {
2262         delays_.push_back(category);
2263       }
2264     } else if (category.at(0) == '-') {
2265       // Excluded categories start with '-'.
2266       // Remove '-' from category string.
2267       category = category.substr(1);
2268       excluded_.push_back(category);
2269     } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2270                                 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2271       disabled_.push_back(category);
2272     } else {
2273       included_.push_back(category);
2274     }
2275   }
2276 }
2277
2278 void CategoryFilter::WriteString(const StringList& values,
2279                                  std::string* out,
2280                                  bool included) const {
2281   bool prepend_comma = !out->empty();
2282   int token_cnt = 0;
2283   for (StringList::const_iterator ci = values.begin();
2284        ci != values.end(); ++ci) {
2285     if (token_cnt > 0 || prepend_comma)
2286       StringAppendF(out, ",");
2287     StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2288     ++token_cnt;
2289   }
2290 }
2291
2292 void CategoryFilter::WriteString(const StringList& delays,
2293                                  std::string* out) const {
2294   bool prepend_comma = !out->empty();
2295   int token_cnt = 0;
2296   for (StringList::const_iterator ci = delays.begin();
2297        ci != delays.end(); ++ci) {
2298     if (token_cnt > 0 || prepend_comma)
2299       StringAppendF(out, ",");
2300     StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
2301                   ci->c_str());
2302     ++token_cnt;
2303   }
2304 }
2305
2306 std::string CategoryFilter::ToString() const {
2307   std::string filter_string;
2308   WriteString(included_, &filter_string, true);
2309   WriteString(disabled_, &filter_string, true);
2310   WriteString(excluded_, &filter_string, false);
2311   WriteString(delays_, &filter_string);
2312   return filter_string;
2313 }
2314
2315 bool CategoryFilter::IsCategoryGroupEnabled(
2316     const char* category_group_name) const {
2317   // TraceLog should call this method only as  part of enabling/disabling
2318   // categories.
2319   StringList::const_iterator ci;
2320
2321   // Check the disabled- filters and the disabled-* wildcard first so that a
2322   // "*" filter does not include the disabled.
2323   for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2324     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2325       return true;
2326   }
2327   if (DoesCategoryGroupContainCategory(category_group_name,
2328                                        TRACE_DISABLED_BY_DEFAULT("*")))
2329     return false;
2330
2331   for (ci = included_.begin(); ci != included_.end(); ++ci) {
2332     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2333       return true;
2334   }
2335
2336   for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
2337     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2338       return false;
2339   }
2340   // If the category group is not excluded, and there are no included patterns
2341   // we consider this pattern enabled.
2342   return included_.empty();
2343 }
2344
2345 bool CategoryFilter::HasIncludedPatterns() const {
2346   return !included_.empty();
2347 }
2348
2349 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2350   // Keep included patterns only if both filters have an included entry.
2351   // Otherwise, one of the filter was specifying "*" and we want to honour the
2352   // broadest filter.
2353   if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2354     included_.insert(included_.end(),
2355                      nested_filter.included_.begin(),
2356                      nested_filter.included_.end());
2357   } else {
2358     included_.clear();
2359   }
2360
2361   disabled_.insert(disabled_.end(),
2362                    nested_filter.disabled_.begin(),
2363                    nested_filter.disabled_.end());
2364   excluded_.insert(excluded_.end(),
2365                    nested_filter.excluded_.begin(),
2366                    nested_filter.excluded_.end());
2367   delays_.insert(delays_.end(),
2368                  nested_filter.delays_.begin(),
2369                  nested_filter.delays_.end());
2370 }
2371
2372 void CategoryFilter::Clear() {
2373   included_.clear();
2374   disabled_.clear();
2375   excluded_.clear();
2376 }
2377
2378 const CategoryFilter::StringList&
2379     CategoryFilter::GetSyntheticDelayValues() const {
2380   return delays_;
2381 }
2382
2383 }  // namespace debug
2384 }  // namespace base
2385
2386 namespace trace_event_internal {
2387
2388 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2389     const char* category_group, const char* name) {
2390   // The single atom works because for now the category_group can only be "gpu".
2391   DCHECK(strcmp(category_group, "gpu") == 0);
2392   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2393   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2394       category_group, atomic, category_group_enabled_);
2395   name_ = name;
2396   if (*category_group_enabled_) {
2397     event_handle_ =
2398         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2399             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2400             trace_event_internal::kNoEventId,
2401             static_cast<int>(base::PlatformThread::CurrentId()),
2402             base::TimeTicks::NowFromSystemTraceTime(),
2403             0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2404   }
2405 }
2406
2407 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2408   if (*category_group_enabled_) {
2409     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2410                                                 name_, event_handle_);
2411   }
2412 }
2413
2414 }  // namespace trace_event_internal