Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / base / debug / trace_event_impl.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/trace_event_impl.h"
6
7 #include <algorithm>
8
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
36
37 #if defined(OS_WIN)
38 #include "base/debug/trace_event_win.h"
39 #endif
40
41 class DeleteTraceLogForTesting {
42  public:
43   static void Delete() {
44     Singleton<base::debug::TraceLog,
45               LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
46   }
47 };
48
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
51
52 namespace base {
53 namespace debug {
54
55 namespace {
56
57 // The overhead of TraceEvent above this threshold will be reported in the
58 // trace.
59 const int kOverheadReportThresholdInMicroseconds = 50;
60
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
64 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
65 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
66 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
71
72 const int kThreadFlushTimeoutMs = 3000;
73
74 #if !defined(OS_NACL)
75 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
76 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
77 #endif
78
79 const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
80
81 #define MAX_CATEGORY_GROUPS 100
82
83 // Parallel arrays g_category_groups and g_category_group_enabled are separate
84 // so that a pointer to a member of g_category_group_enabled can be easily
85 // converted to an index into g_category_groups. This allows macros to deal
86 // only with char enabled pointers from g_category_group_enabled, and we can
87 // convert internally to determine the category name from the char enabled
88 // pointer.
89 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
90   "toplevel",
91   "tracing already shutdown",
92   "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
93   "__metadata",
94   // For reporting trace_event overhead. For thread local event buffers only.
95   "trace_event_overhead"};
96
97 // The enabled flag is char instead of bool so that the API can be used from C.
98 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
99 // Indexes here have to match the g_category_groups array indexes above.
100 const int g_category_already_shutdown = 1;
101 const int g_category_categories_exhausted = 2;
102 const int g_category_metadata = 3;
103 const int g_category_trace_event_overhead = 4;
104 const int g_num_builtin_categories = 5;
105 // Skip default categories.
106 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
107
108 // The name of the current thread. This is used to decide if the current
109 // thread name has changed. We combine all the seen thread names into the
110 // output name for the thread.
111 LazyInstance<ThreadLocalPointer<const char> >::Leaky
112     g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
113
114 TimeTicks ThreadNow() {
115   return TimeTicks::IsThreadNowSupported() ?
116       TimeTicks::ThreadNow() : TimeTicks();
117 }
118
119 class TraceBufferRingBuffer : public TraceBuffer {
120  public:
121   TraceBufferRingBuffer(size_t max_chunks)
122       : max_chunks_(max_chunks),
123         recyclable_chunks_queue_(new size_t[queue_capacity()]),
124         queue_head_(0),
125         queue_tail_(max_chunks),
126         current_iteration_index_(0),
127         current_chunk_seq_(1) {
128     chunks_.reserve(max_chunks);
129     for (size_t i = 0; i < max_chunks; ++i)
130       recyclable_chunks_queue_[i] = i;
131   }
132
133   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
134     // Because the number of threads is much less than the number of chunks,
135     // the queue should never be empty.
136     DCHECK(!QueueIsEmpty());
137
138     *index = recyclable_chunks_queue_[queue_head_];
139     queue_head_ = NextQueueIndex(queue_head_);
140     current_iteration_index_ = queue_head_;
141
142     if (*index >= chunks_.size())
143       chunks_.resize(*index + 1);
144
145     TraceBufferChunk* chunk = chunks_[*index];
146     chunks_[*index] = NULL;  // Put NULL in the slot of a in-flight chunk.
147     if (chunk)
148       chunk->Reset(current_chunk_seq_++);
149     else
150       chunk = new TraceBufferChunk(current_chunk_seq_++);
151
152     return scoped_ptr<TraceBufferChunk>(chunk);
153   }
154
155   virtual void ReturnChunk(size_t index,
156                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
157     // When this method is called, the queue should not be full because it
158     // can contain all chunks including the one to be returned.
159     DCHECK(!QueueIsFull());
160     DCHECK(chunk);
161     DCHECK_LT(index, chunks_.size());
162     DCHECK(!chunks_[index]);
163     chunks_[index] = chunk.release();
164     recyclable_chunks_queue_[queue_tail_] = index;
165     queue_tail_ = NextQueueIndex(queue_tail_);
166   }
167
168   virtual bool IsFull() const OVERRIDE {
169     return false;
170   }
171
172   virtual size_t Size() const OVERRIDE {
173     // This is approximate because not all of the chunks are full.
174     return chunks_.size() * kTraceBufferChunkSize;
175   }
176
177   virtual size_t Capacity() const OVERRIDE {
178     return max_chunks_ * kTraceBufferChunkSize;
179   }
180
181   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
182     if (handle.chunk_index >= chunks_.size())
183       return NULL;
184     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
185     if (!chunk || chunk->seq() != handle.chunk_seq)
186       return NULL;
187     return chunk->GetEventAt(handle.event_index);
188   }
189
190   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
191     if (chunks_.empty())
192       return NULL;
193
194     while (current_iteration_index_ != queue_tail_) {
195       size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
196       current_iteration_index_ = NextQueueIndex(current_iteration_index_);
197       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
198         continue;
199       DCHECK(chunks_[chunk_index]);
200       return chunks_[chunk_index];
201     }
202     return NULL;
203   }
204
205   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
206     scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
207     for (size_t queue_index = queue_head_; queue_index != queue_tail_;
208         queue_index = NextQueueIndex(queue_index)) {
209       size_t chunk_index = recyclable_chunks_queue_[queue_index];
210       if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
211         continue;
212       TraceBufferChunk* chunk = chunks_[chunk_index];
213       cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
214     }
215     return cloned_buffer.PassAs<TraceBuffer>();
216   }
217
218  private:
219   class ClonedTraceBuffer : public TraceBuffer {
220    public:
221     ClonedTraceBuffer() : current_iteration_index_(0) {}
222
223     // The only implemented method.
224     virtual const TraceBufferChunk* NextChunk() OVERRIDE {
225       return current_iteration_index_ < chunks_.size() ?
226           chunks_[current_iteration_index_++] : NULL;
227     }
228
229     virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
230       NOTIMPLEMENTED();
231       return scoped_ptr<TraceBufferChunk>();
232     }
233     virtual void ReturnChunk(size_t index,
234                              scoped_ptr<TraceBufferChunk>) OVERRIDE {
235       NOTIMPLEMENTED();
236     }
237     virtual bool IsFull() const OVERRIDE { return false; }
238     virtual size_t Size() const OVERRIDE { return 0; }
239     virtual size_t Capacity() const OVERRIDE { return 0; }
240     virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
241       return NULL;
242     }
243     virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
244       NOTIMPLEMENTED();
245       return scoped_ptr<TraceBuffer>();
246     }
247
248     size_t current_iteration_index_;
249     ScopedVector<TraceBufferChunk> chunks_;
250   };
251
252   bool QueueIsEmpty() const {
253     return queue_head_ == queue_tail_;
254   }
255
256   size_t QueueSize() const {
257     return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
258         queue_tail_ + queue_capacity() - queue_head_;
259   }
260
261   bool QueueIsFull() const {
262     return QueueSize() == queue_capacity() - 1;
263   }
264
265   size_t queue_capacity() const {
266     // One extra space to help distinguish full state and empty state.
267     return max_chunks_ + 1;
268   }
269
270   size_t NextQueueIndex(size_t index) const {
271     index++;
272     if (index >= queue_capacity())
273       index = 0;
274     return index;
275   }
276
277   size_t max_chunks_;
278   ScopedVector<TraceBufferChunk> chunks_;
279
280   scoped_ptr<size_t[]> recyclable_chunks_queue_;
281   size_t queue_head_;
282   size_t queue_tail_;
283
284   size_t current_iteration_index_;
285   uint32 current_chunk_seq_;
286
287   DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
288 };
289
290 class TraceBufferVector : public TraceBuffer {
291  public:
292   TraceBufferVector()
293       : in_flight_chunk_count_(0),
294         current_iteration_index_(0) {
295     chunks_.reserve(kTraceEventVectorBufferChunks);
296   }
297
298   virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
299     // This function may be called when adding normal events or indirectly from
300     // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
301     // have to add the metadata events and flush thread-local buffers even if
302     // the buffer is full.
303     *index = chunks_.size();
304     chunks_.push_back(NULL);  // Put NULL in the slot of a in-flight chunk.
305     ++in_flight_chunk_count_;
306     // + 1 because zero chunk_seq is not allowed.
307     return scoped_ptr<TraceBufferChunk>(
308         new TraceBufferChunk(static_cast<uint32>(*index) + 1));
309   }
310
311   virtual void ReturnChunk(size_t index,
312                            scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
313     DCHECK_GT(in_flight_chunk_count_, 0u);
314     DCHECK_LT(index, chunks_.size());
315     DCHECK(!chunks_[index]);
316     --in_flight_chunk_count_;
317     chunks_[index] = chunk.release();
318   }
319
320   virtual bool IsFull() const OVERRIDE {
321     return chunks_.size() >= kTraceEventVectorBufferChunks;
322   }
323
324   virtual size_t Size() const OVERRIDE {
325     // This is approximate because not all of the chunks are full.
326     return chunks_.size() * kTraceBufferChunkSize;
327   }
328
329   virtual size_t Capacity() const OVERRIDE {
330     return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
331   }
332
333   virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
334     if (handle.chunk_index >= chunks_.size())
335       return NULL;
336     TraceBufferChunk* chunk = chunks_[handle.chunk_index];
337     if (!chunk || chunk->seq() != handle.chunk_seq)
338       return NULL;
339     return chunk->GetEventAt(handle.event_index);
340   }
341
342   virtual const TraceBufferChunk* NextChunk() OVERRIDE {
343     while (current_iteration_index_ < chunks_.size()) {
344       // Skip in-flight chunks.
345       const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
346       if (chunk)
347         return chunk;
348     }
349     return NULL;
350   }
351
352   virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
353     NOTIMPLEMENTED();
354     return scoped_ptr<TraceBuffer>();
355   }
356
357  private:
358   size_t in_flight_chunk_count_;
359   size_t current_iteration_index_;
360   ScopedVector<TraceBufferChunk> chunks_;
361
362   DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
363 };
364
365 template <typename T>
366 void InitializeMetadataEvent(TraceEvent* trace_event,
367                              int thread_id,
368                              const char* metadata_name, const char* arg_name,
369                              const T& value) {
370   if (!trace_event)
371     return;
372
373   int num_args = 1;
374   unsigned char arg_type;
375   unsigned long long arg_value;
376   ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
377   trace_event->Initialize(thread_id,
378                           TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
379                           &g_category_group_enabled[g_category_metadata],
380                           metadata_name, ::trace_event_internal::kNoEventId,
381                           num_args, &arg_name, &arg_type, &arg_value, NULL,
382                           TRACE_EVENT_FLAG_NONE);
383 }
384
385 class AutoThreadLocalBoolean {
386  public:
387   explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
388       : thread_local_boolean_(thread_local_boolean) {
389     DCHECK(!thread_local_boolean_->Get());
390     thread_local_boolean_->Set(true);
391   }
392   ~AutoThreadLocalBoolean() {
393     thread_local_boolean_->Set(false);
394   }
395
396  private:
397   ThreadLocalBoolean* thread_local_boolean_;
398   DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
399 };
400
401 }  // namespace
402
403 void TraceBufferChunk::Reset(uint32 new_seq) {
404   for (size_t i = 0; i < next_free_; ++i)
405     chunk_[i].Reset();
406   next_free_ = 0;
407   seq_ = new_seq;
408 }
409
410 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
411   DCHECK(!IsFull());
412   *event_index = next_free_++;
413   return &chunk_[*event_index];
414 }
415
416 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
417   scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
418   cloned_chunk->next_free_ = next_free_;
419   for (size_t i = 0; i < next_free_; ++i)
420     cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
421   return cloned_chunk.Pass();
422 }
423
424 // A helper class that allows the lock to be acquired in the middle of the scope
425 // and unlocks at the end of scope if locked.
426 class TraceLog::OptionalAutoLock {
427  public:
428   explicit OptionalAutoLock(Lock& lock)
429       : lock_(lock),
430         locked_(false) {
431   }
432
433   ~OptionalAutoLock() {
434     if (locked_)
435       lock_.Release();
436   }
437
438   void EnsureAcquired() {
439     if (!locked_) {
440       lock_.Acquire();
441       locked_ = true;
442     }
443   }
444
445  private:
446   Lock& lock_;
447   bool locked_;
448   DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
449 };
450
451 // Use this function instead of TraceEventHandle constructor to keep the
452 // overhead of ScopedTracer (trace_event.h) constructor minimum.
453 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
454                 TraceEventHandle* handle) {
455   DCHECK(chunk_seq);
456   DCHECK(chunk_index < (1u << 16));
457   DCHECK(event_index < (1u << 16));
458   handle->chunk_seq = chunk_seq;
459   handle->chunk_index = static_cast<uint16>(chunk_index);
460   handle->event_index = static_cast<uint16>(event_index);
461 }
462
463 ////////////////////////////////////////////////////////////////////////////////
464 //
465 // TraceEvent
466 //
467 ////////////////////////////////////////////////////////////////////////////////
468
469 namespace {
470
471 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
472
473 // Copies |*member| into |*buffer|, sets |*member| to point to this new
474 // location, and then advances |*buffer| by the amount written.
475 void CopyTraceEventParameter(char** buffer,
476                              const char** member,
477                              const char* end) {
478   if (*member) {
479     size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
480     DCHECK_LE(static_cast<int>(written), end - *buffer);
481     *member = *buffer;
482     *buffer += written;
483   }
484 }
485
486 }  // namespace
487
488 TraceEvent::TraceEvent()
489     : duration_(TimeDelta::FromInternalValue(-1)),
490       id_(0u),
491       category_group_enabled_(NULL),
492       name_(NULL),
493       thread_id_(0),
494       phase_(TRACE_EVENT_PHASE_BEGIN),
495       flags_(0) {
496   for (int i = 0; i < kTraceMaxNumArgs; ++i)
497     arg_names_[i] = NULL;
498   memset(arg_values_, 0, sizeof(arg_values_));
499 }
500
501 TraceEvent::~TraceEvent() {
502 }
503
504 void TraceEvent::CopyFrom(const TraceEvent& other) {
505   timestamp_ = other.timestamp_;
506   thread_timestamp_ = other.thread_timestamp_;
507   duration_ = other.duration_;
508   id_ = other.id_;
509   category_group_enabled_ = other.category_group_enabled_;
510   name_ = other.name_;
511   thread_id_ = other.thread_id_;
512   phase_ = other.phase_;
513   flags_ = other.flags_;
514   parameter_copy_storage_ = other.parameter_copy_storage_;
515
516   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
517     arg_names_[i] = other.arg_names_[i];
518     arg_types_[i] = other.arg_types_[i];
519     arg_values_[i] = other.arg_values_[i];
520     convertable_values_[i] = other.convertable_values_[i];
521   }
522 }
523
524 void TraceEvent::Initialize(
525     int thread_id,
526     TimeTicks timestamp,
527     TimeTicks thread_timestamp,
528     char phase,
529     const unsigned char* category_group_enabled,
530     const char* name,
531     unsigned long long id,
532     int num_args,
533     const char** arg_names,
534     const unsigned char* arg_types,
535     const unsigned long long* arg_values,
536     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
537     unsigned char flags) {
538   timestamp_ = timestamp;
539   thread_timestamp_ = thread_timestamp;
540   duration_ = TimeDelta::FromInternalValue(-1);
541   id_ = id;
542   category_group_enabled_ = category_group_enabled;
543   name_ = name;
544   thread_id_ = thread_id;
545   phase_ = phase;
546   flags_ = flags;
547
548   // Clamp num_args since it may have been set by a third_party library.
549   num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
550   int i = 0;
551   for (; i < num_args; ++i) {
552     arg_names_[i] = arg_names[i];
553     arg_types_[i] = arg_types[i];
554
555     if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
556       convertable_values_[i] = convertable_values[i];
557     else
558       arg_values_[i].as_uint = arg_values[i];
559   }
560   for (; i < kTraceMaxNumArgs; ++i) {
561     arg_names_[i] = NULL;
562     arg_values_[i].as_uint = 0u;
563     convertable_values_[i] = NULL;
564     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
565   }
566
567   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
568   size_t alloc_size = 0;
569   if (copy) {
570     alloc_size += GetAllocLength(name);
571     for (i = 0; i < num_args; ++i) {
572       alloc_size += GetAllocLength(arg_names_[i]);
573       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
574         arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
575     }
576   }
577
578   bool arg_is_copy[kTraceMaxNumArgs];
579   for (i = 0; i < num_args; ++i) {
580     // No copying of convertable types, we retain ownership.
581     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
582       continue;
583
584     // We only take a copy of arg_vals if they are of type COPY_STRING.
585     arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
586     if (arg_is_copy[i])
587       alloc_size += GetAllocLength(arg_values_[i].as_string);
588   }
589
590   if (alloc_size) {
591     parameter_copy_storage_ = new RefCountedString;
592     parameter_copy_storage_->data().resize(alloc_size);
593     char* ptr = string_as_array(&parameter_copy_storage_->data());
594     const char* end = ptr + alloc_size;
595     if (copy) {
596       CopyTraceEventParameter(&ptr, &name_, end);
597       for (i = 0; i < num_args; ++i) {
598         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
599       }
600     }
601     for (i = 0; i < num_args; ++i) {
602       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
603         continue;
604       if (arg_is_copy[i])
605         CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
606     }
607     DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
608   }
609 }
610
611 void TraceEvent::Reset() {
612   // Only reset fields that won't be initialized in Initialize(), or that may
613   // hold references to other objects.
614   duration_ = TimeDelta::FromInternalValue(-1);
615   parameter_copy_storage_ = NULL;
616   for (int i = 0; i < kTraceMaxNumArgs; ++i)
617     convertable_values_[i] = NULL;
618 }
619
620 void TraceEvent::UpdateDuration(const TimeTicks& now,
621                                 const TimeTicks& thread_now) {
622   DCHECK(duration_.ToInternalValue() == -1);
623   duration_ = now - timestamp_;
624   thread_duration_ = thread_now - thread_timestamp_;
625 }
626
627 // static
628 void TraceEvent::AppendValueAsJSON(unsigned char type,
629                                    TraceEvent::TraceValue value,
630                                    std::string* out) {
631   switch (type) {
632     case TRACE_VALUE_TYPE_BOOL:
633       *out += value.as_bool ? "true" : "false";
634       break;
635     case TRACE_VALUE_TYPE_UINT:
636       StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
637       break;
638     case TRACE_VALUE_TYPE_INT:
639       StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
640       break;
641     case TRACE_VALUE_TYPE_DOUBLE: {
642       // FIXME: base/json/json_writer.cc is using the same code,
643       //        should be made into a common method.
644       std::string real;
645       double val = value.as_double;
646       if (IsFinite(val)) {
647         real = DoubleToString(val);
648         // Ensure that the number has a .0 if there's no decimal or 'e'.  This
649         // makes sure that when we read the JSON back, it's interpreted as a
650         // real rather than an int.
651         if (real.find('.') == std::string::npos &&
652             real.find('e') == std::string::npos &&
653             real.find('E') == std::string::npos) {
654           real.append(".0");
655         }
656         // The JSON spec requires that non-integer values in the range (-1,1)
657         // have a zero before the decimal point - ".52" is not valid, "0.52" is.
658         if (real[0] == '.') {
659           real.insert(0, "0");
660         } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
661           // "-.1" bad "-0.1" good
662           real.insert(1, "0");
663         }
664       } else if (IsNaN(val)){
665         // The JSON spec doesn't allow NaN and Infinity (since these are
666         // objects in EcmaScript).  Use strings instead.
667         real = "\"NaN\"";
668       } else if (val < 0) {
669         real = "\"-Infinity\"";
670       } else {
671         real = "\"Infinity\"";
672       }
673       StringAppendF(out, "%s", real.c_str());
674       break;
675     }
676     case TRACE_VALUE_TYPE_POINTER:
677       // JSON only supports double and int numbers.
678       // So as not to lose bits from a 64-bit pointer, output as a hex string.
679       StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
680                                      reinterpret_cast<intptr_t>(
681                                      value.as_pointer)));
682       break;
683     case TRACE_VALUE_TYPE_STRING:
684     case TRACE_VALUE_TYPE_COPY_STRING:
685       EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
686       break;
687     default:
688       NOTREACHED() << "Don't know how to print this value";
689       break;
690   }
691 }
692
693 void TraceEvent::AppendAsJSON(std::string* out) const {
694   int64 time_int64 = timestamp_.ToInternalValue();
695   int process_id = TraceLog::GetInstance()->process_id();
696   // Category group checked at category creation time.
697   DCHECK(!strchr(name_, '"'));
698   StringAppendF(out,
699       "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
700       "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
701       TraceLog::GetCategoryGroupName(category_group_enabled_),
702       process_id,
703       thread_id_,
704       time_int64,
705       phase_,
706       name_);
707
708   // Output argument names and values, stop at first NULL argument name.
709   for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
710     if (i > 0)
711       *out += ",";
712     *out += "\"";
713     *out += arg_names_[i];
714     *out += "\":";
715
716     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
717       convertable_values_[i]->AppendAsTraceFormat(out);
718     else
719       AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
720   }
721   *out += "}";
722
723   if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
724     int64 duration = duration_.ToInternalValue();
725     if (duration != -1)
726       StringAppendF(out, ",\"dur\":%" PRId64, duration);
727     if (!thread_timestamp_.is_null()) {
728       int64 thread_duration = thread_duration_.ToInternalValue();
729       if (thread_duration != -1)
730         StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
731     }
732   }
733
734   // Output tts if thread_timestamp is valid.
735   if (!thread_timestamp_.is_null()) {
736     int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
737     StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
738   }
739
740   // If id_ is set, print it out as a hex string so we don't loose any
741   // bits (it might be a 64-bit pointer).
742   if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
743     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
744
745   // Instant events also output their scope.
746   if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
747     char scope = '?';
748     switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
749       case TRACE_EVENT_SCOPE_GLOBAL:
750         scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
751         break;
752
753       case TRACE_EVENT_SCOPE_PROCESS:
754         scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
755         break;
756
757       case TRACE_EVENT_SCOPE_THREAD:
758         scope = TRACE_EVENT_SCOPE_NAME_THREAD;
759         break;
760     }
761     StringAppendF(out, ",\"s\":\"%c\"", scope);
762   }
763
764   *out += "}";
765 }
766
767 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
768   *out << name_ << "[";
769   *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
770   *out << "]";
771   if (arg_names_[0]) {
772     *out << ", {";
773     for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
774       if (i > 0)
775         *out << ", ";
776       *out << arg_names_[i] << ":";
777       std::string value_as_text;
778
779       if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
780         convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
781       else
782         AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
783
784       *out << value_as_text;
785     }
786     *out << "}";
787   }
788 }
789
790 ////////////////////////////////////////////////////////////////////////////////
791 //
792 // TraceResultBuffer
793 //
794 ////////////////////////////////////////////////////////////////////////////////
795
796 TraceResultBuffer::OutputCallback
797     TraceResultBuffer::SimpleOutput::GetCallback() {
798   return Bind(&SimpleOutput::Append, Unretained(this));
799 }
800
801 void TraceResultBuffer::SimpleOutput::Append(
802     const std::string& json_trace_output) {
803   json_output += json_trace_output;
804 }
805
806 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
807 }
808
809 TraceResultBuffer::~TraceResultBuffer() {
810 }
811
812 void TraceResultBuffer::SetOutputCallback(
813     const OutputCallback& json_chunk_callback) {
814   output_callback_ = json_chunk_callback;
815 }
816
817 void TraceResultBuffer::Start() {
818   append_comma_ = false;
819   output_callback_.Run("[");
820 }
821
822 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
823   if (append_comma_)
824     output_callback_.Run(",");
825   append_comma_ = true;
826   output_callback_.Run(trace_fragment);
827 }
828
829 void TraceResultBuffer::Finish() {
830   output_callback_.Run("]");
831 }
832
833 ////////////////////////////////////////////////////////////////////////////////
834 //
835 // TraceSamplingThread
836 //
837 ////////////////////////////////////////////////////////////////////////////////
838 class TraceBucketData;
839 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
840
841 class TraceBucketData {
842  public:
843   TraceBucketData(base::subtle::AtomicWord* bucket,
844                   const char* name,
845                   TraceSampleCallback callback);
846   ~TraceBucketData();
847
848   TRACE_EVENT_API_ATOMIC_WORD* bucket;
849   const char* bucket_name;
850   TraceSampleCallback callback;
851 };
852
853 // This object must be created on the IO thread.
854 class TraceSamplingThread : public PlatformThread::Delegate {
855  public:
856   TraceSamplingThread();
857   virtual ~TraceSamplingThread();
858
859   // Implementation of PlatformThread::Delegate:
860   virtual void ThreadMain() OVERRIDE;
861
862   static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
863
864   void Stop();
865   void WaitSamplingEventForTesting();
866
867  private:
868   friend class TraceLog;
869
870   void GetSamples();
871   // Not thread-safe. Once the ThreadMain has been called, this can no longer
872   // be called.
873   void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
874                             const char* const name,
875                             TraceSampleCallback callback);
876   // Splits a combined "category\0name" into the two component parts.
877   static void ExtractCategoryAndName(const char* combined,
878                                      const char** category,
879                                      const char** name);
880   std::vector<TraceBucketData> sample_buckets_;
881   bool thread_running_;
882   CancellationFlag cancellation_flag_;
883   WaitableEvent waitable_event_for_testing_;
884 };
885
886
887 TraceSamplingThread::TraceSamplingThread()
888     : thread_running_(false),
889       waitable_event_for_testing_(false, false) {
890 }
891
892 TraceSamplingThread::~TraceSamplingThread() {
893 }
894
895 void TraceSamplingThread::ThreadMain() {
896   PlatformThread::SetName("Sampling Thread");
897   thread_running_ = true;
898   const int kSamplingFrequencyMicroseconds = 1000;
899   while (!cancellation_flag_.IsSet()) {
900     PlatformThread::Sleep(
901         TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
902     GetSamples();
903     waitable_event_for_testing_.Signal();
904   }
905 }
906
907 // static
908 void TraceSamplingThread::DefaultSamplingCallback(
909     TraceBucketData* bucket_data) {
910   TRACE_EVENT_API_ATOMIC_WORD category_and_name =
911       TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
912   if (!category_and_name)
913     return;
914   const char* const combined =
915       reinterpret_cast<const char* const>(category_and_name);
916   const char* category_group;
917   const char* name;
918   ExtractCategoryAndName(combined, &category_group, &name);
919   TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
920       TraceLog::GetCategoryGroupEnabled(category_group),
921       name, 0, 0, NULL, NULL, NULL, NULL, 0);
922 }
923
924 void TraceSamplingThread::GetSamples() {
925   for (size_t i = 0; i < sample_buckets_.size(); ++i) {
926     TraceBucketData* bucket_data = &sample_buckets_[i];
927     bucket_data->callback.Run(bucket_data);
928   }
929 }
930
931 void TraceSamplingThread::RegisterSampleBucket(
932     TRACE_EVENT_API_ATOMIC_WORD* bucket,
933     const char* const name,
934     TraceSampleCallback callback) {
935   // Access to sample_buckets_ doesn't cause races with the sampling thread
936   // that uses the sample_buckets_, because it is guaranteed that
937   // RegisterSampleBucket is called before the sampling thread is created.
938   DCHECK(!thread_running_);
939   sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
940 }
941
942 // static
943 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
944                                                  const char** category,
945                                                  const char** name) {
946   *category = combined;
947   *name = &combined[strlen(combined) + 1];
948 }
949
950 void TraceSamplingThread::Stop() {
951   cancellation_flag_.Set();
952 }
953
954 void TraceSamplingThread::WaitSamplingEventForTesting() {
955   waitable_event_for_testing_.Wait();
956 }
957
958 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
959                                  const char* name,
960                                  TraceSampleCallback callback)
961     : bucket(bucket),
962       bucket_name(name),
963       callback(callback) {
964 }
965
966 TraceBucketData::~TraceBucketData() {
967 }
968
969 ////////////////////////////////////////////////////////////////////////////////
970 //
971 // TraceLog
972 //
973 ////////////////////////////////////////////////////////////////////////////////
974
975 class TraceLog::ThreadLocalEventBuffer
976     : public MessageLoop::DestructionObserver {
977  public:
978   ThreadLocalEventBuffer(TraceLog* trace_log);
979   virtual ~ThreadLocalEventBuffer();
980
981   TraceEvent* AddTraceEvent(TraceEventHandle* handle);
982
983   void ReportOverhead(const TimeTicks& event_timestamp,
984                       const TimeTicks& event_thread_timestamp);
985
986   TraceEvent* GetEventByHandle(TraceEventHandle handle) {
987     if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
988         handle.chunk_index != chunk_index_)
989       return NULL;
990
991     return chunk_->GetEventAt(handle.event_index);
992   }
993
994   int generation() const { return generation_; }
995
996  private:
997   // MessageLoop::DestructionObserver
998   virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
999
1000   void FlushWhileLocked();
1001
1002   void CheckThisIsCurrentBuffer() const {
1003     DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1004   }
1005
1006   // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1007   // as long as the thread exists.
1008   TraceLog* trace_log_;
1009   scoped_ptr<TraceBufferChunk> chunk_;
1010   size_t chunk_index_;
1011   int event_count_;
1012   TimeDelta overhead_;
1013   int generation_;
1014
1015   DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
1016 };
1017
1018 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
1019     : trace_log_(trace_log),
1020       chunk_index_(0),
1021       event_count_(0),
1022       generation_(trace_log->generation()) {
1023   // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1024   // the following message_loop won't be NULL.
1025   MessageLoop* message_loop = MessageLoop::current();
1026   message_loop->AddDestructionObserver(this);
1027
1028   AutoLock lock(trace_log->lock_);
1029   trace_log->thread_message_loops_.insert(message_loop);
1030 }
1031
1032 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1033   CheckThisIsCurrentBuffer();
1034   MessageLoop::current()->RemoveDestructionObserver(this);
1035
1036   // Zero event_count_ happens in either of the following cases:
1037   // - no event generated for the thread;
1038   // - the thread has no message loop;
1039   // - trace_event_overhead is disabled.
1040   if (event_count_) {
1041     InitializeMetadataEvent(AddTraceEvent(NULL),
1042                             static_cast<int>(base::PlatformThread::CurrentId()),
1043                             "overhead", "average_overhead",
1044                             overhead_.InMillisecondsF() / event_count_);
1045   }
1046
1047   {
1048     AutoLock lock(trace_log_->lock_);
1049     FlushWhileLocked();
1050     trace_log_->thread_message_loops_.erase(MessageLoop::current());
1051   }
1052   trace_log_->thread_local_event_buffer_.Set(NULL);
1053 }
1054
1055 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1056     TraceEventHandle* handle) {
1057   CheckThisIsCurrentBuffer();
1058
1059   if (chunk_ && chunk_->IsFull()) {
1060     AutoLock lock(trace_log_->lock_);
1061     FlushWhileLocked();
1062     chunk_.reset();
1063   }
1064   if (!chunk_) {
1065     AutoLock lock(trace_log_->lock_);
1066     chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
1067     trace_log_->CheckIfBufferIsFullWhileLocked();
1068   }
1069   if (!chunk_)
1070     return NULL;
1071
1072   size_t event_index;
1073   TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1074   if (trace_event && handle)
1075     MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1076
1077   return trace_event;
1078 }
1079
1080 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1081     const TimeTicks& event_timestamp,
1082     const TimeTicks& event_thread_timestamp) {
1083   if (!g_category_group_enabled[g_category_trace_event_overhead])
1084     return;
1085
1086   CheckThisIsCurrentBuffer();
1087
1088   event_count_++;
1089   TimeTicks thread_now = ThreadNow();
1090   TimeTicks now = trace_log_->OffsetNow();
1091   TimeDelta overhead = now - event_timestamp;
1092   if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1093     TraceEvent* trace_event = AddTraceEvent(NULL);
1094     if (trace_event) {
1095       trace_event->Initialize(
1096           static_cast<int>(PlatformThread::CurrentId()),
1097           event_timestamp, event_thread_timestamp,
1098           TRACE_EVENT_PHASE_COMPLETE,
1099           &g_category_group_enabled[g_category_trace_event_overhead],
1100           "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
1101       trace_event->UpdateDuration(now, thread_now);
1102     }
1103   }
1104   overhead_ += overhead;
1105 }
1106
1107 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1108   delete this;
1109 }
1110
1111 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1112   if (!chunk_)
1113     return;
1114
1115   trace_log_->lock_.AssertAcquired();
1116   if (trace_log_->CheckGeneration(generation_)) {
1117     // Return the chunk to the buffer only if the generation matches.
1118     trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1119   }
1120   // Otherwise this method may be called from the destructor, or TraceLog will
1121   // find the generation mismatch and delete this buffer soon.
1122 }
1123
1124 // static
1125 TraceLog* TraceLog::GetInstance() {
1126   return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1127 }
1128
1129 TraceLog::TraceLog()
1130     : mode_(DISABLED),
1131       num_traces_recorded_(0),
1132       event_callback_(0),
1133       dispatching_to_observer_list_(false),
1134       process_sort_index_(0),
1135       process_id_hash_(0),
1136       process_id_(0),
1137       watch_category_(0),
1138       trace_options_(RECORD_UNTIL_FULL),
1139       sampling_thread_handle_(0),
1140       category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1141       event_callback_category_filter_(
1142           CategoryFilter::kDefaultCategoryFilterString),
1143       thread_shared_chunk_index_(0),
1144       generation_(0) {
1145   // Trace is enabled or disabled on one thread while other threads are
1146   // accessing the enabled flag. We don't care whether edge-case events are
1147   // traced or not, so we allow races on the enabled flag to keep the trace
1148   // macros fast.
1149   // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1150   // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1151   //                            sizeof(g_category_group_enabled),
1152   //                           "trace_event category enabled");
1153   for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
1154     ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
1155                          "trace_event category enabled");
1156   }
1157 #if defined(OS_NACL)  // NaCl shouldn't expose the process id.
1158   SetProcessID(0);
1159 #else
1160   SetProcessID(static_cast<int>(GetCurrentProcId()));
1161
1162   // NaCl also shouldn't access the command line.
1163   if (CommandLine::InitializedForCurrentProcess() &&
1164       CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
1165     std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1166         switches::kTraceToConsole);
1167     if (filter.empty()) {
1168       filter = kEchoToConsoleCategoryFilter;
1169     } else {
1170       filter.append(",");
1171       filter.append(kEchoToConsoleCategoryFilter);
1172     }
1173
1174     LOG(ERROR) << "Start " << switches::kTraceToConsole
1175                << " with CategoryFilter '" << filter << "'.";
1176     SetEnabled(CategoryFilter(filter), RECORDING_MODE, ECHO_TO_CONSOLE);
1177   }
1178 #endif
1179
1180   logged_events_.reset(CreateTraceBuffer());
1181 }
1182
1183 TraceLog::~TraceLog() {
1184 }
1185
1186 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1187     const char* category_group) {
1188   TraceLog* tracelog = GetInstance();
1189   if (!tracelog) {
1190     DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
1191     return &g_category_group_enabled[g_category_already_shutdown];
1192   }
1193   return tracelog->GetCategoryGroupEnabledInternal(category_group);
1194 }
1195
1196 const char* TraceLog::GetCategoryGroupName(
1197     const unsigned char* category_group_enabled) {
1198   // Calculate the index of the category group by finding
1199   // category_group_enabled in g_category_group_enabled array.
1200   uintptr_t category_begin =
1201       reinterpret_cast<uintptr_t>(g_category_group_enabled);
1202   uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
1203   DCHECK(category_ptr >= category_begin &&
1204          category_ptr < reinterpret_cast<uintptr_t>(
1205              g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1206       "out of bounds category pointer";
1207   uintptr_t category_index =
1208       (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1209   return g_category_groups[category_index];
1210 }
1211
1212 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1213   unsigned char enabled_flag = 0;
1214   const char* category_group = g_category_groups[category_index];
1215   if (mode_ == RECORDING_MODE &&
1216       category_filter_.IsCategoryGroupEnabled(category_group))
1217     enabled_flag |= ENABLED_FOR_RECORDING;
1218   else if (mode_ == MONITORING_MODE &&
1219       category_filter_.IsCategoryGroupEnabled(category_group))
1220     enabled_flag |= ENABLED_FOR_MONITORING;
1221   if (event_callback_ &&
1222       event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
1223     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1224   g_category_group_enabled[category_index] = enabled_flag;
1225 }
1226
1227 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1228   int category_index = base::subtle::NoBarrier_Load(&g_category_index);
1229   for (int i = 0; i < category_index; i++)
1230     UpdateCategoryGroupEnabledFlag(i);
1231 }
1232
1233 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1234   ResetTraceEventSyntheticDelays();
1235   const CategoryFilter::StringList& delays =
1236       category_filter_.GetSyntheticDelayValues();
1237   CategoryFilter::StringList::const_iterator ci;
1238   for (ci = delays.begin(); ci != delays.end(); ++ci) {
1239     StringTokenizer tokens(*ci, ";");
1240     if (!tokens.GetNext())
1241       continue;
1242     TraceEventSyntheticDelay* delay =
1243         TraceEventSyntheticDelay::Lookup(tokens.token());
1244     while (tokens.GetNext()) {
1245       std::string token = tokens.token();
1246       char* duration_end;
1247       double target_duration = strtod(token.c_str(), &duration_end);
1248       if (duration_end != token.c_str()) {
1249         delay->SetTargetDuration(
1250             TimeDelta::FromMicroseconds(target_duration * 1e6));
1251       } else if (token == "static") {
1252         delay->SetMode(TraceEventSyntheticDelay::STATIC);
1253       } else if (token == "oneshot") {
1254         delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
1255       } else if (token == "alternating") {
1256         delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
1257       }
1258     }
1259   }
1260 }
1261
1262 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1263     const char* category_group) {
1264   DCHECK(!strchr(category_group, '"')) <<
1265       "Category groups may not contain double quote";
1266   // The g_category_groups is append only, avoid using a lock for the fast path.
1267   int current_category_index = base::subtle::Acquire_Load(&g_category_index);
1268
1269   // Search for pre-existing category group.
1270   for (int i = 0; i < current_category_index; ++i) {
1271     if (strcmp(g_category_groups[i], category_group) == 0) {
1272       return &g_category_group_enabled[i];
1273     }
1274   }
1275
1276   unsigned char* category_group_enabled = NULL;
1277   // This is the slow path: the lock is not held in the case above, so more
1278   // than one thread could have reached here trying to add the same category.
1279   // Only hold to lock when actually appending a new category, and
1280   // check the categories groups again.
1281   AutoLock lock(lock_);
1282   int category_index = base::subtle::Acquire_Load(&g_category_index);
1283   for (int i = 0; i < category_index; ++i) {
1284     if (strcmp(g_category_groups[i], category_group) == 0) {
1285       return &g_category_group_enabled[i];
1286     }
1287   }
1288
1289   // Create a new category group.
1290   DCHECK(category_index < MAX_CATEGORY_GROUPS) <<
1291       "must increase MAX_CATEGORY_GROUPS";
1292   if (category_index < MAX_CATEGORY_GROUPS) {
1293     // Don't hold on to the category_group pointer, so that we can create
1294     // category groups with strings not known at compile time (this is
1295     // required by SetWatchEvent).
1296     const char* new_group = strdup(category_group);
1297     ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1298     g_category_groups[category_index] = new_group;
1299     DCHECK(!g_category_group_enabled[category_index]);
1300     // Note that if both included and excluded patterns in the
1301     // CategoryFilter are empty, we exclude nothing,
1302     // thereby enabling this category group.
1303     UpdateCategoryGroupEnabledFlag(category_index);
1304     category_group_enabled = &g_category_group_enabled[category_index];
1305     // Update the max index now.
1306     base::subtle::Release_Store(&g_category_index, category_index + 1);
1307   } else {
1308     category_group_enabled =
1309         &g_category_group_enabled[g_category_categories_exhausted];
1310   }
1311   return category_group_enabled;
1312 }
1313
1314 void TraceLog::GetKnownCategoryGroups(
1315     std::vector<std::string>* category_groups) {
1316   AutoLock lock(lock_);
1317   category_groups->push_back(
1318       g_category_groups[g_category_trace_event_overhead]);
1319   int category_index = base::subtle::NoBarrier_Load(&g_category_index);
1320   for (int i = g_num_builtin_categories; i < category_index; i++)
1321     category_groups->push_back(g_category_groups[i]);
1322 }
1323
1324 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
1325                           Mode mode,
1326                           Options options) {
1327   std::vector<EnabledStateObserver*> observer_list;
1328   {
1329     AutoLock lock(lock_);
1330
1331     // Can't enable tracing when Flush() is in progress.
1332     DCHECK(!flush_message_loop_proxy_.get());
1333
1334     Options old_options = trace_options();
1335
1336     if (IsEnabled()) {
1337       if (options != old_options) {
1338         DLOG(ERROR) << "Attempting to re-enable tracing with a different "
1339                     << "set of options.";
1340       }
1341
1342       if (mode != mode_) {
1343         DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
1344       }
1345
1346       category_filter_.Merge(category_filter);
1347       UpdateCategoryGroupEnabledFlags();
1348       return;
1349     }
1350
1351     if (dispatching_to_observer_list_) {
1352       DLOG(ERROR) <<
1353           "Cannot manipulate TraceLog::Enabled state from an observer.";
1354       return;
1355     }
1356
1357     mode_ = mode;
1358
1359     if (options != old_options) {
1360       subtle::NoBarrier_Store(&trace_options_, options);
1361       UseNextTraceBuffer();
1362     }
1363
1364     num_traces_recorded_++;
1365
1366     category_filter_ = CategoryFilter(category_filter);
1367     UpdateCategoryGroupEnabledFlags();
1368     UpdateSyntheticDelaysFromCategoryFilter();
1369
1370     if (options & ENABLE_SAMPLING) {
1371       sampling_thread_.reset(new TraceSamplingThread);
1372       sampling_thread_->RegisterSampleBucket(
1373           &g_trace_state[0],
1374           "bucket0",
1375           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1376       sampling_thread_->RegisterSampleBucket(
1377           &g_trace_state[1],
1378           "bucket1",
1379           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1380       sampling_thread_->RegisterSampleBucket(
1381           &g_trace_state[2],
1382           "bucket2",
1383           Bind(&TraceSamplingThread::DefaultSamplingCallback));
1384       if (!PlatformThread::Create(
1385             0, sampling_thread_.get(), &sampling_thread_handle_)) {
1386         DCHECK(false) << "failed to create thread";
1387       }
1388     }
1389
1390     dispatching_to_observer_list_ = true;
1391     observer_list = enabled_state_observer_list_;
1392   }
1393   // Notify observers outside the lock in case they trigger trace events.
1394   for (size_t i = 0; i < observer_list.size(); ++i)
1395     observer_list[i]->OnTraceLogEnabled();
1396
1397   {
1398     AutoLock lock(lock_);
1399     dispatching_to_observer_list_ = false;
1400   }
1401 }
1402
1403 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
1404   AutoLock lock(lock_);
1405   return category_filter_;
1406 }
1407
1408 void TraceLog::SetDisabled() {
1409   AutoLock lock(lock_);
1410   SetDisabledWhileLocked();
1411 }
1412
1413 void TraceLog::SetDisabledWhileLocked() {
1414   lock_.AssertAcquired();
1415
1416   if (!IsEnabled())
1417     return;
1418
1419   if (dispatching_to_observer_list_) {
1420     DLOG(ERROR)
1421         << "Cannot manipulate TraceLog::Enabled state from an observer.";
1422     return;
1423   }
1424
1425   mode_ = DISABLED;
1426
1427   if (sampling_thread_.get()) {
1428     // Stop the sampling thread.
1429     sampling_thread_->Stop();
1430     lock_.Release();
1431     PlatformThread::Join(sampling_thread_handle_);
1432     lock_.Acquire();
1433     sampling_thread_handle_ = PlatformThreadHandle();
1434     sampling_thread_.reset();
1435   }
1436
1437   category_filter_.Clear();
1438   subtle::NoBarrier_Store(&watch_category_, 0);
1439   watch_event_name_ = "";
1440   UpdateCategoryGroupEnabledFlags();
1441   AddMetadataEventsWhileLocked();
1442
1443   dispatching_to_observer_list_ = true;
1444   std::vector<EnabledStateObserver*> observer_list =
1445       enabled_state_observer_list_;
1446
1447   {
1448     // Dispatch to observers outside the lock in case the observer triggers a
1449     // trace event.
1450     AutoUnlock unlock(lock_);
1451     for (size_t i = 0; i < observer_list.size(); ++i)
1452       observer_list[i]->OnTraceLogDisabled();
1453   }
1454   dispatching_to_observer_list_ = false;
1455 }
1456
1457 int TraceLog::GetNumTracesRecorded() {
1458   AutoLock lock(lock_);
1459   if (!IsEnabled())
1460     return -1;
1461   return num_traces_recorded_;
1462 }
1463
1464 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1465   enabled_state_observer_list_.push_back(listener);
1466 }
1467
1468 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1469   std::vector<EnabledStateObserver*>::iterator it =
1470       std::find(enabled_state_observer_list_.begin(),
1471                 enabled_state_observer_list_.end(),
1472                 listener);
1473   if (it != enabled_state_observer_list_.end())
1474     enabled_state_observer_list_.erase(it);
1475 }
1476
1477 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1478   std::vector<EnabledStateObserver*>::const_iterator it =
1479       std::find(enabled_state_observer_list_.begin(),
1480                 enabled_state_observer_list_.end(),
1481                 listener);
1482   return it != enabled_state_observer_list_.end();
1483 }
1484
1485 float TraceLog::GetBufferPercentFull() const {
1486   AutoLock lock(lock_);
1487   return static_cast<float>(static_cast<double>(logged_events_->Size()) /
1488                             logged_events_->Capacity());
1489 }
1490
1491 bool TraceLog::BufferIsFull() const {
1492   AutoLock lock(lock_);
1493   return logged_events_->IsFull();
1494 }
1495
1496 TraceBuffer* TraceLog::CreateTraceBuffer() {
1497   Options options = trace_options();
1498   if (options & RECORD_CONTINUOUSLY)
1499     return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
1500   else if ((options & ENABLE_SAMPLING) && mode_ == MONITORING_MODE)
1501     return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
1502   else if (options & ECHO_TO_CONSOLE)
1503     return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1504   return new TraceBufferVector();
1505 }
1506
1507 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1508     TraceEventHandle* handle, bool check_buffer_is_full) {
1509   lock_.AssertAcquired();
1510
1511   if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1512     logged_events_->ReturnChunk(thread_shared_chunk_index_,
1513                                 thread_shared_chunk_.Pass());
1514   }
1515
1516   if (!thread_shared_chunk_) {
1517     thread_shared_chunk_ = logged_events_->GetChunk(
1518         &thread_shared_chunk_index_);
1519     if (check_buffer_is_full)
1520       CheckIfBufferIsFullWhileLocked();
1521   }
1522   if (!thread_shared_chunk_)
1523     return NULL;
1524
1525   size_t event_index;
1526   TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1527   if (trace_event && handle) {
1528     MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1529                event_index, handle);
1530   }
1531   return trace_event;
1532 }
1533
1534 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1535   lock_.AssertAcquired();
1536   if (logged_events_->IsFull())
1537     SetDisabledWhileLocked();
1538 }
1539
1540 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
1541                                        EventCallback cb) {
1542   AutoLock lock(lock_);
1543   subtle::NoBarrier_Store(&event_callback_,
1544                           reinterpret_cast<subtle::AtomicWord>(cb));
1545   event_callback_category_filter_ = category_filter;
1546   UpdateCategoryGroupEnabledFlags();
1547 };
1548
1549 void TraceLog::SetEventCallbackDisabled() {
1550   AutoLock lock(lock_);
1551   subtle::NoBarrier_Store(&event_callback_, 0);
1552   UpdateCategoryGroupEnabledFlags();
1553 }
1554
1555 // Flush() works as the following:
1556 // 1. Flush() is called in threadA whose message loop is saved in
1557 //    flush_message_loop_proxy_;
1558 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1559 //    loop to flush the thread local buffers; otherwise finish the flush;
1560 // 3. FlushCurrentThread() deletes the thread local event buffer:
1561 //    - The last batch of events of the thread are flushed into the main buffer;
1562 //    - The message loop will be removed from thread_message_loops_;
1563 //    If this is the last message loop, finish the flush;
1564 // 4. If any thread hasn't finish its flush in time, finish the flush.
1565 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1566   if (IsEnabled()) {
1567     // Can't flush when tracing is enabled because otherwise PostTask would
1568     // - generate more trace events;
1569     // - deschedule the calling thread on some platforms causing inaccurate
1570     //   timing of the trace events.
1571     scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1572     if (!cb.is_null())
1573       cb.Run(empty_result, false);
1574     LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1575     return;
1576   }
1577
1578   int generation = this->generation();
1579   {
1580     AutoLock lock(lock_);
1581     DCHECK(!flush_message_loop_proxy_.get());
1582     flush_message_loop_proxy_ = MessageLoopProxy::current();
1583     DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1584     flush_output_callback_ = cb;
1585
1586     if (thread_shared_chunk_) {
1587       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1588                                   thread_shared_chunk_.Pass());
1589     }
1590
1591     if (thread_message_loops_.size()) {
1592       for (hash_set<MessageLoop*>::const_iterator it =
1593            thread_message_loops_.begin();
1594            it != thread_message_loops_.end(); ++it) {
1595         (*it)->PostTask(
1596             FROM_HERE,
1597             Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
1598       }
1599       flush_message_loop_proxy_->PostDelayedTask(
1600           FROM_HERE,
1601           Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
1602           TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1603       return;
1604     }
1605   }
1606
1607   FinishFlush(generation);
1608 }
1609
1610 void TraceLog::ConvertTraceEventsToTraceFormat(
1611     scoped_ptr<TraceBuffer> logged_events,
1612     const TraceLog::OutputCallback& flush_output_callback) {
1613
1614   if (flush_output_callback.is_null())
1615     return;
1616
1617   // The callback need to be called at least once even if there is no events
1618   // to let the caller know the completion of flush.
1619   bool has_more_events = true;
1620   do {
1621     scoped_refptr<RefCountedString> json_events_str_ptr =
1622         new RefCountedString();
1623
1624     for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
1625       const TraceBufferChunk* chunk = logged_events->NextChunk();
1626       if (!chunk) {
1627         has_more_events = false;
1628         break;
1629       }
1630       for (size_t j = 0; j < chunk->size(); ++j) {
1631         if (i > 0 || j > 0)
1632           json_events_str_ptr->data().append(",");
1633         chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
1634       }
1635     }
1636
1637     flush_output_callback.Run(json_events_str_ptr, has_more_events);
1638   } while (has_more_events);
1639 }
1640
1641 void TraceLog::FinishFlush(int generation) {
1642   scoped_ptr<TraceBuffer> previous_logged_events;
1643   OutputCallback flush_output_callback;
1644
1645   if (!CheckGeneration(generation))
1646     return;
1647
1648   {
1649     AutoLock lock(lock_);
1650
1651     previous_logged_events.swap(logged_events_);
1652     UseNextTraceBuffer();
1653     thread_message_loops_.clear();
1654
1655     flush_message_loop_proxy_ = NULL;
1656     flush_output_callback = flush_output_callback_;
1657     flush_output_callback_.Reset();
1658   }
1659
1660   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1661                                   flush_output_callback);
1662 }
1663
1664 // Run in each thread holding a local event buffer.
1665 void TraceLog::FlushCurrentThread(int generation) {
1666   {
1667     AutoLock lock(lock_);
1668     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1669       // This is late. The corresponding flush has finished.
1670       return;
1671     }
1672   }
1673
1674   // This will flush the thread local buffer.
1675   delete thread_local_event_buffer_.Get();
1676
1677   AutoLock lock(lock_);
1678   if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
1679       thread_message_loops_.size())
1680     return;
1681
1682   flush_message_loop_proxy_->PostTask(
1683       FROM_HERE,
1684       Bind(&TraceLog::FinishFlush, Unretained(this), generation));
1685 }
1686
1687 void TraceLog::OnFlushTimeout(int generation) {
1688   {
1689     AutoLock lock(lock_);
1690     if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1691       // Flush has finished before timeout.
1692       return;
1693     }
1694
1695     LOG(WARNING) <<
1696         "The following threads haven't finished flush in time. "
1697         "If this happens stably for some thread, please call "
1698         "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1699         "the thread to avoid its trace events from being lost.";
1700     for (hash_set<MessageLoop*>::const_iterator it =
1701          thread_message_loops_.begin();
1702          it != thread_message_loops_.end(); ++it) {
1703       LOG(WARNING) << "Thread: " << (*it)->thread_name();
1704     }
1705   }
1706   FinishFlush(generation);
1707 }
1708
1709 void TraceLog::FlushButLeaveBufferIntact(
1710     const TraceLog::OutputCallback& flush_output_callback) {
1711   scoped_ptr<TraceBuffer> previous_logged_events;
1712   {
1713     AutoLock lock(lock_);
1714     AddMetadataEventsWhileLocked();
1715     if (thread_shared_chunk_) {
1716       // Return the chunk to the main buffer to flush the sampling data.
1717       logged_events_->ReturnChunk(thread_shared_chunk_index_,
1718                                   thread_shared_chunk_.Pass());
1719     }
1720     previous_logged_events = logged_events_->CloneForIteration().Pass();
1721   }  // release lock
1722
1723   ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1724                                   flush_output_callback);
1725 }
1726
1727 void TraceLog::UseNextTraceBuffer() {
1728   logged_events_.reset(CreateTraceBuffer());
1729   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1730   thread_shared_chunk_.reset();
1731   thread_shared_chunk_index_ = 0;
1732 }
1733
1734 TraceEventHandle TraceLog::AddTraceEvent(
1735     char phase,
1736     const unsigned char* category_group_enabled,
1737     const char* name,
1738     unsigned long long id,
1739     int num_args,
1740     const char** arg_names,
1741     const unsigned char* arg_types,
1742     const unsigned long long* arg_values,
1743     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1744     unsigned char flags) {
1745   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1746   base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1747   return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1748                                                name, id, thread_id, now,
1749                                                num_args, arg_names,
1750                                                arg_types, arg_values,
1751                                                convertable_values, flags);
1752 }
1753
1754 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1755     char phase,
1756     const unsigned char* category_group_enabled,
1757     const char* name,
1758     unsigned long long id,
1759     int thread_id,
1760     const TimeTicks& timestamp,
1761     int num_args,
1762     const char** arg_names,
1763     const unsigned char* arg_types,
1764     const unsigned long long* arg_values,
1765     const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1766     unsigned char flags) {
1767   TraceEventHandle handle = { 0, 0, 0 };
1768   if (!*category_group_enabled)
1769     return handle;
1770
1771   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1772   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1773   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1774   if (thread_is_in_trace_event_.Get())
1775     return handle;
1776
1777   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1778
1779   DCHECK(name);
1780
1781   if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1782     id ^= process_id_hash_;
1783
1784   TimeTicks now = OffsetTimestamp(timestamp);
1785   TimeTicks thread_now = ThreadNow();
1786
1787   ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1788   // A ThreadLocalEventBuffer needs the message loop
1789   // - to know when the thread exits;
1790   // - to handle the final flush.
1791   // For a thread without a message loop or the message loop may be blocked, the
1792   // trace events will be added into the main buffer directly.
1793   if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1794     thread_local_event_buffer = thread_local_event_buffer_.Get();
1795     if (thread_local_event_buffer &&
1796         !CheckGeneration(thread_local_event_buffer->generation())) {
1797       delete thread_local_event_buffer;
1798       thread_local_event_buffer = NULL;
1799     }
1800     if (!thread_local_event_buffer) {
1801       thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1802       thread_local_event_buffer_.Set(thread_local_event_buffer);
1803     }
1804   }
1805
1806   // Check and update the current thread name only if the event is for the
1807   // current thread to avoid locks in most cases.
1808   if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1809     const char* new_name = ThreadIdNameManager::GetInstance()->
1810         GetName(thread_id);
1811     // Check if the thread name has been set or changed since the previous
1812     // call (if any), but don't bother if the new name is empty. Note this will
1813     // not detect a thread name change within the same char* buffer address: we
1814     // favor common case performance over corner case correctness.
1815     if (new_name != g_current_thread_name.Get().Get() &&
1816         new_name && *new_name) {
1817       g_current_thread_name.Get().Set(new_name);
1818
1819       AutoLock thread_info_lock(thread_info_lock_);
1820
1821       hash_map<int, std::string>::iterator existing_name =
1822           thread_names_.find(thread_id);
1823       if (existing_name == thread_names_.end()) {
1824         // This is a new thread id, and a new name.
1825         thread_names_[thread_id] = new_name;
1826       } else {
1827         // This is a thread id that we've seen before, but potentially with a
1828         // new name.
1829         std::vector<StringPiece> existing_names;
1830         Tokenize(existing_name->second, ",", &existing_names);
1831         bool found = std::find(existing_names.begin(),
1832                                existing_names.end(),
1833                                new_name) != existing_names.end();
1834         if (!found) {
1835           if (existing_names.size())
1836             existing_name->second.push_back(',');
1837           existing_name->second.append(new_name);
1838         }
1839       }
1840     }
1841   }
1842
1843   std::string console_message;
1844   if (*category_group_enabled &
1845       (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
1846     OptionalAutoLock lock(lock_);
1847
1848     TraceEvent* trace_event = NULL;
1849     if (thread_local_event_buffer) {
1850       trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1851     } else {
1852       lock.EnsureAcquired();
1853       trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1854     }
1855
1856     if (trace_event) {
1857       trace_event->Initialize(thread_id, now, thread_now, phase,
1858                               category_group_enabled, name, id,
1859                               num_args, arg_names, arg_types, arg_values,
1860                               convertable_values, flags);
1861
1862 #if defined(OS_ANDROID)
1863       trace_event->SendToATrace();
1864 #endif
1865     }
1866
1867     if (trace_options() & ECHO_TO_CONSOLE) {
1868       console_message = EventToConsoleMessage(
1869           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1870           timestamp, trace_event);
1871     }
1872   }
1873
1874   if (console_message.size())
1875     LOG(ERROR) << console_message;
1876
1877   if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1878       &watch_category_)) == category_group_enabled) {
1879     bool event_name_matches;
1880     WatchEventCallback watch_event_callback_copy;
1881     {
1882       AutoLock lock(lock_);
1883       event_name_matches = watch_event_name_ == name;
1884       watch_event_callback_copy = watch_event_callback_;
1885     }
1886     if (event_name_matches) {
1887       if (!watch_event_callback_copy.is_null())
1888         watch_event_callback_copy.Run();
1889     }
1890   }
1891
1892   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1893     EventCallback event_callback = reinterpret_cast<EventCallback>(
1894         subtle::NoBarrier_Load(&event_callback_));
1895     if (event_callback) {
1896       event_callback(now,
1897                      phase == TRACE_EVENT_PHASE_COMPLETE ?
1898                          TRACE_EVENT_PHASE_BEGIN : phase,
1899                      category_group_enabled, name, id,
1900                      num_args, arg_names, arg_types, arg_values,
1901                      flags);
1902     }
1903   }
1904
1905   if (thread_local_event_buffer)
1906     thread_local_event_buffer->ReportOverhead(now, thread_now);
1907
1908   return handle;
1909 }
1910
1911 // May be called when a COMPELETE event ends and the unfinished event has been
1912 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1913 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1914                                             const TimeTicks& timestamp,
1915                                             TraceEvent* trace_event) {
1916   AutoLock thread_info_lock(thread_info_lock_);
1917
1918   // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1919   // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1920   DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1921
1922   TimeDelta duration;
1923   int thread_id = trace_event ?
1924       trace_event->thread_id() : PlatformThread::CurrentId();
1925   if (phase == TRACE_EVENT_PHASE_END) {
1926     duration = timestamp - thread_event_start_times_[thread_id].top();
1927     thread_event_start_times_[thread_id].pop();
1928   }
1929
1930   std::string thread_name = thread_names_[thread_id];
1931   if (thread_colors_.find(thread_name) == thread_colors_.end())
1932     thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1933
1934   std::ostringstream log;
1935   log << base::StringPrintf("%s: \x1b[0;3%dm",
1936                             thread_name.c_str(),
1937                             thread_colors_[thread_name]);
1938
1939   size_t depth = 0;
1940   if (thread_event_start_times_.find(thread_id) !=
1941       thread_event_start_times_.end())
1942     depth = thread_event_start_times_[thread_id].size();
1943
1944   for (size_t i = 0; i < depth; ++i)
1945     log << "| ";
1946
1947   if (trace_event)
1948     trace_event->AppendPrettyPrinted(&log);
1949   if (phase == TRACE_EVENT_PHASE_END)
1950     log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1951
1952   log << "\x1b[0;m";
1953
1954   if (phase == TRACE_EVENT_PHASE_BEGIN)
1955     thread_event_start_times_[thread_id].push(timestamp);
1956
1957   return log.str();
1958 }
1959
1960 void TraceLog::AddTraceEventEtw(char phase,
1961                                 const char* name,
1962                                 const void* id,
1963                                 const char* extra) {
1964 #if defined(OS_WIN)
1965   TraceEventETWProvider::Trace(name, phase, id, extra);
1966 #endif
1967   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1968                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1969 }
1970
1971 void TraceLog::AddTraceEventEtw(char phase,
1972                                 const char* name,
1973                                 const void* id,
1974                                 const std::string& extra) {
1975 #if defined(OS_WIN)
1976   TraceEventETWProvider::Trace(name, phase, id, extra);
1977 #endif
1978   INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1979                            TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1980 }
1981
1982 void TraceLog::UpdateTraceEventDuration(
1983     const unsigned char* category_group_enabled,
1984     const char* name,
1985     TraceEventHandle handle) {
1986   // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1987   // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1988   // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1989   if (thread_is_in_trace_event_.Get())
1990     return;
1991
1992   AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1993
1994   TimeTicks thread_now = ThreadNow();
1995   TimeTicks now = OffsetNow();
1996
1997   std::string console_message;
1998   if (*category_group_enabled & ENABLED_FOR_RECORDING) {
1999     OptionalAutoLock lock(lock_);
2000
2001     TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
2002     if (trace_event) {
2003       DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
2004       trace_event->UpdateDuration(now, thread_now);
2005 #if defined(OS_ANDROID)
2006       trace_event->SendToATrace();
2007 #endif
2008     }
2009
2010     if (trace_options() & ECHO_TO_CONSOLE) {
2011       console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
2012                                               now, trace_event);
2013     }
2014   }
2015
2016   if (console_message.size())
2017     LOG(ERROR) << console_message;
2018
2019   if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
2020     EventCallback event_callback = reinterpret_cast<EventCallback>(
2021         subtle::NoBarrier_Load(&event_callback_));
2022     if (event_callback) {
2023       event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
2024                      trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
2025                      TRACE_EVENT_FLAG_NONE);
2026     }
2027   }
2028 }
2029
2030 void TraceLog::SetWatchEvent(const std::string& category_name,
2031                              const std::string& event_name,
2032                              const WatchEventCallback& callback) {
2033   const unsigned char* category = GetCategoryGroupEnabled(
2034       category_name.c_str());
2035   AutoLock lock(lock_);
2036   subtle::NoBarrier_Store(&watch_category_,
2037                           reinterpret_cast<subtle::AtomicWord>(category));
2038   watch_event_name_ = event_name;
2039   watch_event_callback_ = callback;
2040 }
2041
2042 void TraceLog::CancelWatchEvent() {
2043   AutoLock lock(lock_);
2044   subtle::NoBarrier_Store(&watch_category_, 0);
2045   watch_event_name_ = "";
2046   watch_event_callback_.Reset();
2047 }
2048
2049 void TraceLog::AddMetadataEventsWhileLocked() {
2050   lock_.AssertAcquired();
2051
2052   int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
2053   if (process_sort_index_ != 0) {
2054     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2055                             current_thread_id,
2056                             "process_sort_index", "sort_index",
2057                             process_sort_index_);
2058   }
2059
2060   if (process_name_.size()) {
2061     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2062                             current_thread_id,
2063                             "process_name", "name",
2064                             process_name_);
2065   }
2066
2067   if (process_labels_.size() > 0) {
2068     std::vector<std::string> labels;
2069     for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
2070         it != process_labels_.end();
2071         it++) {
2072       labels.push_back(it->second);
2073     }
2074     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2075                             current_thread_id,
2076                             "process_labels", "labels",
2077                             JoinString(labels, ','));
2078   }
2079
2080   // Thread sort indices.
2081   for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
2082       it != thread_sort_indices_.end();
2083       it++) {
2084     if (it->second == 0)
2085       continue;
2086     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2087                             it->first,
2088                             "thread_sort_index", "sort_index",
2089                             it->second);
2090   }
2091
2092   // Thread names.
2093   AutoLock thread_info_lock(thread_info_lock_);
2094   for(hash_map<int, std::string>::iterator it = thread_names_.begin();
2095       it != thread_names_.end();
2096       it++) {
2097     if (it->second.empty())
2098       continue;
2099     InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2100                             it->first,
2101                             "thread_name", "name",
2102                             it->second);
2103   }
2104 }
2105
2106 void TraceLog::WaitSamplingEventForTesting() {
2107   if (!sampling_thread_)
2108     return;
2109   sampling_thread_->WaitSamplingEventForTesting();
2110 }
2111
2112 void TraceLog::DeleteForTesting() {
2113   DeleteTraceLogForTesting::Delete();
2114 }
2115
2116 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2117   return GetEventByHandleInternal(handle, NULL);
2118 }
2119
2120 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2121                                                OptionalAutoLock* lock) {
2122   if (!handle.chunk_seq)
2123     return NULL;
2124
2125   if (thread_local_event_buffer_.Get()) {
2126     TraceEvent* trace_event =
2127         thread_local_event_buffer_.Get()->GetEventByHandle(handle);
2128     if (trace_event)
2129       return trace_event;
2130   }
2131
2132   // The event has been out-of-control of the thread local buffer.
2133   // Try to get the event from the main buffer with a lock.
2134   if (lock)
2135     lock->EnsureAcquired();
2136
2137   if (thread_shared_chunk_ &&
2138       handle.chunk_index == thread_shared_chunk_index_) {
2139     return handle.chunk_seq == thread_shared_chunk_->seq() ?
2140         thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
2141   }
2142
2143   return logged_events_->GetEventByHandle(handle);
2144 }
2145
2146 void TraceLog::SetProcessID(int process_id) {
2147   process_id_ = process_id;
2148   // Create a FNV hash from the process ID for XORing.
2149   // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2150   unsigned long long offset_basis = 14695981039346656037ull;
2151   unsigned long long fnv_prime = 1099511628211ull;
2152   unsigned long long pid = static_cast<unsigned long long>(process_id_);
2153   process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
2154 }
2155
2156 void TraceLog::SetProcessSortIndex(int sort_index) {
2157   AutoLock lock(lock_);
2158   process_sort_index_ = sort_index;
2159 }
2160
2161 void TraceLog::SetProcessName(const std::string& process_name) {
2162   AutoLock lock(lock_);
2163   process_name_ = process_name;
2164 }
2165
2166 void TraceLog::UpdateProcessLabel(
2167     int label_id, const std::string& current_label) {
2168   if(!current_label.length())
2169     return RemoveProcessLabel(label_id);
2170
2171   AutoLock lock(lock_);
2172   process_labels_[label_id] = current_label;
2173 }
2174
2175 void TraceLog::RemoveProcessLabel(int label_id) {
2176   AutoLock lock(lock_);
2177   base::hash_map<int, std::string>::iterator it = process_labels_.find(
2178       label_id);
2179   if (it == process_labels_.end())
2180     return;
2181
2182   process_labels_.erase(it);
2183 }
2184
2185 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2186   AutoLock lock(lock_);
2187   thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
2188 }
2189
2190 void TraceLog::SetTimeOffset(TimeDelta offset) {
2191   time_offset_ = offset;
2192 }
2193
2194 size_t TraceLog::GetObserverCountForTest() const {
2195   return enabled_state_observer_list_.size();
2196 }
2197
2198 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2199   thread_blocks_message_loop_.Set(true);
2200   if (thread_local_event_buffer_.Get()) {
2201     // This will flush the thread local buffer.
2202     delete thread_local_event_buffer_.Get();
2203   }
2204 }
2205
2206 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2207     const std::string& str) {
2208   return  str.empty() ||
2209           str.at(0) == ' ' ||
2210           str.at(str.length() - 1) == ' ';
2211 }
2212
2213 bool CategoryFilter::DoesCategoryGroupContainCategory(
2214     const char* category_group,
2215     const char* category) const {
2216   DCHECK(category);
2217   CStringTokenizer category_group_tokens(category_group,
2218                           category_group + strlen(category_group), ",");
2219   while (category_group_tokens.GetNext()) {
2220     std::string category_group_token = category_group_tokens.token();
2221     // Don't allow empty tokens, nor tokens with leading or trailing space.
2222     DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2223         category_group_token))
2224         << "Disallowed category string";
2225     if (MatchPattern(category_group_token.c_str(), category))
2226       return true;
2227   }
2228   return false;
2229 }
2230
2231 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2232   if (!filter_string.empty())
2233     Initialize(filter_string);
2234   else
2235     Initialize(CategoryFilter::kDefaultCategoryFilterString);
2236 }
2237
2238 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2239     : included_(cf.included_),
2240       disabled_(cf.disabled_),
2241       excluded_(cf.excluded_),
2242       delays_(cf.delays_) {
2243 }
2244
2245 CategoryFilter::~CategoryFilter() {
2246 }
2247
2248 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2249   if (this == &rhs)
2250     return *this;
2251
2252   included_ = rhs.included_;
2253   disabled_ = rhs.disabled_;
2254   excluded_ = rhs.excluded_;
2255   delays_ = rhs.delays_;
2256   return *this;
2257 }
2258
2259 void CategoryFilter::Initialize(const std::string& filter_string) {
2260   // Tokenize list of categories, delimited by ','.
2261   StringTokenizer tokens(filter_string, ",");
2262   // Add each token to the appropriate list (included_,excluded_).
2263   while (tokens.GetNext()) {
2264     std::string category = tokens.token();
2265     // Ignore empty categories.
2266     if (category.empty())
2267       continue;
2268     // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2269     if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
2270         category.at(category.size() - 1) == ')') {
2271       category = category.substr(
2272           strlen(kSyntheticDelayCategoryFilterPrefix),
2273           category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
2274       size_t name_length = category.find(';');
2275       if (name_length != std::string::npos && name_length > 0 &&
2276           name_length != category.size() - 1) {
2277         delays_.push_back(category);
2278       }
2279     } else if (category.at(0) == '-') {
2280       // Excluded categories start with '-'.
2281       // Remove '-' from category string.
2282       category = category.substr(1);
2283       excluded_.push_back(category);
2284     } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2285                                 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2286       disabled_.push_back(category);
2287     } else {
2288       included_.push_back(category);
2289     }
2290   }
2291 }
2292
2293 void CategoryFilter::WriteString(const StringList& values,
2294                                  std::string* out,
2295                                  bool included) const {
2296   bool prepend_comma = !out->empty();
2297   int token_cnt = 0;
2298   for (StringList::const_iterator ci = values.begin();
2299        ci != values.end(); ++ci) {
2300     if (token_cnt > 0 || prepend_comma)
2301       StringAppendF(out, ",");
2302     StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2303     ++token_cnt;
2304   }
2305 }
2306
2307 void CategoryFilter::WriteString(const StringList& delays,
2308                                  std::string* out) const {
2309   bool prepend_comma = !out->empty();
2310   int token_cnt = 0;
2311   for (StringList::const_iterator ci = delays.begin();
2312        ci != delays.end(); ++ci) {
2313     if (token_cnt > 0 || prepend_comma)
2314       StringAppendF(out, ",");
2315     StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
2316                   ci->c_str());
2317     ++token_cnt;
2318   }
2319 }
2320
2321 std::string CategoryFilter::ToString() const {
2322   std::string filter_string;
2323   WriteString(included_, &filter_string, true);
2324   WriteString(disabled_, &filter_string, true);
2325   WriteString(excluded_, &filter_string, false);
2326   WriteString(delays_, &filter_string);
2327   return filter_string;
2328 }
2329
2330 bool CategoryFilter::IsCategoryGroupEnabled(
2331     const char* category_group_name) const {
2332   // TraceLog should call this method only as  part of enabling/disabling
2333   // categories.
2334   StringList::const_iterator ci;
2335
2336   // Check the disabled- filters and the disabled-* wildcard first so that a
2337   // "*" filter does not include the disabled.
2338   for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2339     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2340       return true;
2341   }
2342   if (DoesCategoryGroupContainCategory(category_group_name,
2343                                        TRACE_DISABLED_BY_DEFAULT("*")))
2344     return false;
2345
2346   for (ci = included_.begin(); ci != included_.end(); ++ci) {
2347     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2348       return true;
2349   }
2350
2351   for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
2352     if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2353       return false;
2354   }
2355   // If the category group is not excluded, and there are no included patterns
2356   // we consider this pattern enabled.
2357   return included_.empty();
2358 }
2359
2360 bool CategoryFilter::HasIncludedPatterns() const {
2361   return !included_.empty();
2362 }
2363
2364 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2365   // Keep included patterns only if both filters have an included entry.
2366   // Otherwise, one of the filter was specifying "*" and we want to honour the
2367   // broadest filter.
2368   if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2369     included_.insert(included_.end(),
2370                      nested_filter.included_.begin(),
2371                      nested_filter.included_.end());
2372   } else {
2373     included_.clear();
2374   }
2375
2376   disabled_.insert(disabled_.end(),
2377                    nested_filter.disabled_.begin(),
2378                    nested_filter.disabled_.end());
2379   excluded_.insert(excluded_.end(),
2380                    nested_filter.excluded_.begin(),
2381                    nested_filter.excluded_.end());
2382   delays_.insert(delays_.end(),
2383                  nested_filter.delays_.begin(),
2384                  nested_filter.delays_.end());
2385 }
2386
2387 void CategoryFilter::Clear() {
2388   included_.clear();
2389   disabled_.clear();
2390   excluded_.clear();
2391 }
2392
2393 const CategoryFilter::StringList&
2394     CategoryFilter::GetSyntheticDelayValues() const {
2395   return delays_;
2396 }
2397
2398 }  // namespace debug
2399 }  // namespace base
2400
2401 namespace trace_event_internal {
2402
2403 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2404     const char* category_group, const char* name) {
2405   // The single atom works because for now the category_group can only be "gpu".
2406   DCHECK(strcmp(category_group, "gpu") == 0);
2407   static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2408   INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2409       category_group, atomic, category_group_enabled_);
2410   name_ = name;
2411   if (*category_group_enabled_) {
2412     event_handle_ =
2413         TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2414             TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2415             trace_event_internal::kNoEventId,
2416             static_cast<int>(base::PlatformThread::CurrentId()),
2417             base::TimeTicks::NowFromSystemTraceTime(),
2418             0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2419   }
2420 }
2421
2422 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2423   if (*category_group_enabled_) {
2424     TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2425                                                 name_, event_handle_);
2426   }
2427 }
2428
2429 }  // namespace trace_event_internal