1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/debug/trace_event_synthetic_delay.h"
15 #include "base/float_util.h"
16 #include "base/format_macros.h"
17 #include "base/json/string_escape.h"
18 #include "base/lazy_instance.h"
19 #include "base/memory/singleton.h"
20 #include "base/message_loop/message_loop.h"
21 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h"
23 #include "base/strings/string_number_conversions.h"
24 #include "base/strings/string_split.h"
25 #include "base/strings/string_tokenizer.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/strings/utf_string_conversions.h"
29 #include "base/synchronization/cancellation_flag.h"
30 #include "base/synchronization/waitable_event.h"
31 #include "base/sys_info.h"
32 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
33 #include "base/threading/platform_thread.h"
34 #include "base/threading/thread_id_name_manager.h"
35 #include "base/time/time.h"
38 #include "base/debug/trace_event_win.h"
41 class DeleteTraceLogForTesting {
43 static void Delete() {
44 Singleton<base::debug::TraceLog,
45 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
49 // The thread buckets for the sampling profiler.
50 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
57 // The overhead of TraceEvent above this threshold will be reported in the
59 const int kOverheadReportThresholdInMicroseconds = 50;
61 // Controls the number of trace events we will buffer in-memory
62 // before throwing them away.
63 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
64 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
65 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
66 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
67 // Can store results for 30 seconds with 1 ms sampling interval.
68 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
69 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
70 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
72 const int kThreadFlushTimeoutMs = 3000;
74 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
75 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
77 const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
79 #define MAX_CATEGORY_GROUPS 100
81 // Parallel arrays g_category_groups and g_category_group_enabled are separate
82 // so that a pointer to a member of g_category_group_enabled can be easily
83 // converted to an index into g_category_groups. This allows macros to deal
84 // only with char enabled pointers from g_category_group_enabled, and we can
85 // convert internally to determine the category name from the char enabled
87 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
89 "tracing already shutdown",
90 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
92 // For reporting trace_event overhead. For thread local event buffers only.
93 "trace_event_overhead"};
95 // The enabled flag is char instead of bool so that the API can be used from C.
96 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
97 // Indexes here have to match the g_category_groups array indexes above.
98 const int g_category_already_shutdown = 1;
99 const int g_category_categories_exhausted = 2;
100 const int g_category_metadata = 3;
101 const int g_category_trace_event_overhead = 4;
102 const int g_num_builtin_categories = 5;
103 int g_category_index = g_num_builtin_categories; // Skip default categories.
105 // The name of the current thread. This is used to decide if the current
106 // thread name has changed. We combine all the seen thread names into the
107 // output name for the thread.
108 LazyInstance<ThreadLocalPointer<const char> >::Leaky
109 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
111 TimeTicks ThreadNow() {
112 return TimeTicks::IsThreadNowSupported() ?
113 TimeTicks::ThreadNow() : TimeTicks();
116 class TraceBufferRingBuffer : public TraceBuffer {
118 TraceBufferRingBuffer(size_t max_chunks)
119 : max_chunks_(max_chunks),
120 recyclable_chunks_queue_(new size_t[queue_capacity()]),
122 queue_tail_(max_chunks),
123 current_iteration_index_(0),
124 current_chunk_seq_(1) {
125 chunks_.reserve(max_chunks);
126 for (size_t i = 0; i < max_chunks; ++i)
127 recyclable_chunks_queue_[i] = i;
130 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
131 // Because the number of threads is much less than the number of chunks,
132 // the queue should never be empty.
133 DCHECK(!QueueIsEmpty());
135 *index = recyclable_chunks_queue_[queue_head_];
136 queue_head_ = NextQueueIndex(queue_head_);
137 current_iteration_index_ = queue_head_;
139 if (*index >= chunks_.size())
140 chunks_.resize(*index + 1);
142 TraceBufferChunk* chunk = chunks_[*index];
143 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
145 chunk->Reset(current_chunk_seq_++);
147 chunk = new TraceBufferChunk(current_chunk_seq_++);
149 return scoped_ptr<TraceBufferChunk>(chunk);
152 virtual void ReturnChunk(size_t index,
153 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
154 // When this method is called, the queue should not be full because it
155 // can contain all chunks including the one to be returned.
156 DCHECK(!QueueIsFull());
158 DCHECK_LT(index, chunks_.size());
159 DCHECK(!chunks_[index]);
160 chunks_[index] = chunk.release();
161 recyclable_chunks_queue_[queue_tail_] = index;
162 queue_tail_ = NextQueueIndex(queue_tail_);
165 virtual bool IsFull() const OVERRIDE {
169 virtual size_t Size() const OVERRIDE {
170 // This is approximate because not all of the chunks are full.
171 return chunks_.size() * kTraceBufferChunkSize;
174 virtual size_t Capacity() const OVERRIDE {
175 return max_chunks_ * kTraceBufferChunkSize;
178 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
179 if (handle.chunk_index >= chunks_.size())
181 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
182 if (!chunk || chunk->seq() != handle.chunk_seq)
184 return chunk->GetEventAt(handle.event_index);
187 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
191 while (current_iteration_index_ != queue_tail_) {
192 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
193 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
194 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
196 DCHECK(chunks_[chunk_index]);
197 return chunks_[chunk_index];
202 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
203 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
204 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
205 queue_index = NextQueueIndex(queue_index)) {
206 size_t chunk_index = recyclable_chunks_queue_[queue_index];
207 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
209 TraceBufferChunk* chunk = chunks_[chunk_index];
210 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
212 return cloned_buffer.PassAs<TraceBuffer>();
216 class ClonedTraceBuffer : public TraceBuffer {
218 ClonedTraceBuffer() : current_iteration_index_(0) {}
220 // The only implemented method.
221 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
222 return current_iteration_index_ < chunks_.size() ?
223 chunks_[current_iteration_index_++] : NULL;
226 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
228 return scoped_ptr<TraceBufferChunk>();
230 virtual void ReturnChunk(size_t index,
231 scoped_ptr<TraceBufferChunk>) OVERRIDE {
234 virtual bool IsFull() const OVERRIDE { return false; }
235 virtual size_t Size() const OVERRIDE { return 0; }
236 virtual size_t Capacity() const OVERRIDE { return 0; }
237 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
240 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
242 return scoped_ptr<TraceBuffer>();
245 size_t current_iteration_index_;
246 ScopedVector<TraceBufferChunk> chunks_;
249 bool QueueIsEmpty() const {
250 return queue_head_ == queue_tail_;
253 size_t QueueSize() const {
254 return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
255 queue_tail_ + queue_capacity() - queue_head_;
258 bool QueueIsFull() const {
259 return QueueSize() == queue_capacity() - 1;
262 size_t queue_capacity() const {
263 // One extra space to help distinguish full state and empty state.
264 return max_chunks_ + 1;
267 size_t NextQueueIndex(size_t index) const {
269 if (index >= queue_capacity())
275 ScopedVector<TraceBufferChunk> chunks_;
277 scoped_ptr<size_t[]> recyclable_chunks_queue_;
281 size_t current_iteration_index_;
282 uint32 current_chunk_seq_;
284 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
287 class TraceBufferVector : public TraceBuffer {
290 : in_flight_chunk_count_(0),
291 current_iteration_index_(0) {
292 chunks_.reserve(kTraceEventVectorBufferChunks);
295 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
296 // This function may be called when adding normal events or indirectly from
297 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
298 // have to add the metadata events and flush thread-local buffers even if
299 // the buffer is full.
300 *index = chunks_.size();
301 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
302 ++in_flight_chunk_count_;
303 // + 1 because zero chunk_seq is not allowed.
304 return scoped_ptr<TraceBufferChunk>(
305 new TraceBufferChunk(static_cast<uint32>(*index) + 1));
308 virtual void ReturnChunk(size_t index,
309 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
310 DCHECK_GT(in_flight_chunk_count_, 0u);
311 DCHECK_LT(index, chunks_.size());
312 DCHECK(!chunks_[index]);
313 --in_flight_chunk_count_;
314 chunks_[index] = chunk.release();
317 virtual bool IsFull() const OVERRIDE {
318 return chunks_.size() >= kTraceEventVectorBufferChunks;
321 virtual size_t Size() const OVERRIDE {
322 // This is approximate because not all of the chunks are full.
323 return chunks_.size() * kTraceBufferChunkSize;
326 virtual size_t Capacity() const OVERRIDE {
327 return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
330 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
331 if (handle.chunk_index >= chunks_.size())
333 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
334 if (!chunk || chunk->seq() != handle.chunk_seq)
336 return chunk->GetEventAt(handle.event_index);
339 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
340 while (current_iteration_index_ < chunks_.size()) {
341 // Skip in-flight chunks.
342 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
349 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
351 return scoped_ptr<TraceBuffer>();
355 size_t in_flight_chunk_count_;
356 size_t current_iteration_index_;
357 ScopedVector<TraceBufferChunk> chunks_;
359 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
362 template <typename T>
363 void InitializeMetadataEvent(TraceEvent* trace_event,
365 const char* metadata_name, const char* arg_name,
371 unsigned char arg_type;
372 unsigned long long arg_value;
373 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
374 trace_event->Initialize(thread_id,
375 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
376 &g_category_group_enabled[g_category_metadata],
377 metadata_name, ::trace_event_internal::kNoEventId,
378 num_args, &arg_name, &arg_type, &arg_value, NULL,
379 TRACE_EVENT_FLAG_NONE);
382 class AutoThreadLocalBoolean {
384 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
385 : thread_local_boolean_(thread_local_boolean) {
386 DCHECK(!thread_local_boolean_->Get());
387 thread_local_boolean_->Set(true);
389 ~AutoThreadLocalBoolean() {
390 thread_local_boolean_->Set(false);
394 ThreadLocalBoolean* thread_local_boolean_;
395 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
400 void TraceBufferChunk::Reset(uint32 new_seq) {
401 for (size_t i = 0; i < next_free_; ++i)
407 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
409 *event_index = next_free_++;
410 return &chunk_[*event_index];
413 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
414 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
415 cloned_chunk->next_free_ = next_free_;
416 for (size_t i = 0; i < next_free_; ++i)
417 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
418 return cloned_chunk.Pass();
421 // A helper class that allows the lock to be acquired in the middle of the scope
422 // and unlocks at the end of scope if locked.
423 class TraceLog::OptionalAutoLock {
425 explicit OptionalAutoLock(Lock& lock)
430 ~OptionalAutoLock() {
435 void EnsureAcquired() {
445 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
448 // Use this function instead of TraceEventHandle constructor to keep the
449 // overhead of ScopedTracer (trace_event.h) constructor minimum.
450 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
451 TraceEventHandle* handle) {
453 DCHECK(chunk_index < (1u << 16));
454 DCHECK(event_index < (1u << 16));
455 handle->chunk_seq = chunk_seq;
456 handle->chunk_index = static_cast<uint16>(chunk_index);
457 handle->event_index = static_cast<uint16>(event_index);
460 ////////////////////////////////////////////////////////////////////////////////
464 ////////////////////////////////////////////////////////////////////////////////
468 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
470 // Copies |*member| into |*buffer|, sets |*member| to point to this new
471 // location, and then advances |*buffer| by the amount written.
472 void CopyTraceEventParameter(char** buffer,
476 size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
477 DCHECK_LE(static_cast<int>(written), end - *buffer);
485 TraceEvent::TraceEvent()
486 : duration_(TimeDelta::FromInternalValue(-1)),
488 category_group_enabled_(NULL),
491 phase_(TRACE_EVENT_PHASE_BEGIN),
493 for (int i = 0; i < kTraceMaxNumArgs; ++i)
494 arg_names_[i] = NULL;
495 memset(arg_values_, 0, sizeof(arg_values_));
498 TraceEvent::~TraceEvent() {
501 void TraceEvent::CopyFrom(const TraceEvent& other) {
502 timestamp_ = other.timestamp_;
503 thread_timestamp_ = other.thread_timestamp_;
504 duration_ = other.duration_;
506 category_group_enabled_ = other.category_group_enabled_;
508 thread_id_ = other.thread_id_;
509 phase_ = other.phase_;
510 flags_ = other.flags_;
511 parameter_copy_storage_ = other.parameter_copy_storage_;
513 for (int i = 0; i < kTraceMaxNumArgs; ++i) {
514 arg_names_[i] = other.arg_names_[i];
515 arg_types_[i] = other.arg_types_[i];
516 arg_values_[i] = other.arg_values_[i];
517 convertable_values_[i] = other.convertable_values_[i];
521 void TraceEvent::Initialize(
524 TimeTicks thread_timestamp,
526 const unsigned char* category_group_enabled,
528 unsigned long long id,
530 const char** arg_names,
531 const unsigned char* arg_types,
532 const unsigned long long* arg_values,
533 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
534 unsigned char flags) {
535 timestamp_ = timestamp;
536 thread_timestamp_ = thread_timestamp;
537 duration_ = TimeDelta::FromInternalValue(-1);
539 category_group_enabled_ = category_group_enabled;
541 thread_id_ = thread_id;
545 // Clamp num_args since it may have been set by a third_party library.
546 num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
548 for (; i < num_args; ++i) {
549 arg_names_[i] = arg_names[i];
550 arg_types_[i] = arg_types[i];
552 if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
553 convertable_values_[i] = convertable_values[i];
555 arg_values_[i].as_uint = arg_values[i];
557 for (; i < kTraceMaxNumArgs; ++i) {
558 arg_names_[i] = NULL;
559 arg_values_[i].as_uint = 0u;
560 convertable_values_[i] = NULL;
561 arg_types_[i] = TRACE_VALUE_TYPE_UINT;
564 bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
565 size_t alloc_size = 0;
567 alloc_size += GetAllocLength(name);
568 for (i = 0; i < num_args; ++i) {
569 alloc_size += GetAllocLength(arg_names_[i]);
570 if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
571 arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
575 bool arg_is_copy[kTraceMaxNumArgs];
576 for (i = 0; i < num_args; ++i) {
577 // No copying of convertable types, we retain ownership.
578 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
581 // We only take a copy of arg_vals if they are of type COPY_STRING.
582 arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
584 alloc_size += GetAllocLength(arg_values_[i].as_string);
588 parameter_copy_storage_ = new RefCountedString;
589 parameter_copy_storage_->data().resize(alloc_size);
590 char* ptr = string_as_array(¶meter_copy_storage_->data());
591 const char* end = ptr + alloc_size;
593 CopyTraceEventParameter(&ptr, &name_, end);
594 for (i = 0; i < num_args; ++i) {
595 CopyTraceEventParameter(&ptr, &arg_names_[i], end);
598 for (i = 0; i < num_args; ++i) {
599 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
602 CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
604 DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
608 void TraceEvent::Reset() {
609 // Only reset fields that won't be initialized in Initialize(), or that may
610 // hold references to other objects.
611 duration_ = TimeDelta::FromInternalValue(-1);
612 parameter_copy_storage_ = NULL;
613 for (int i = 0; i < kTraceMaxNumArgs; ++i)
614 convertable_values_[i] = NULL;
617 void TraceEvent::UpdateDuration(const TimeTicks& now,
618 const TimeTicks& thread_now) {
619 DCHECK(duration_.ToInternalValue() == -1);
620 duration_ = now - timestamp_;
621 thread_duration_ = thread_now - thread_timestamp_;
625 void TraceEvent::AppendValueAsJSON(unsigned char type,
626 TraceEvent::TraceValue value,
629 case TRACE_VALUE_TYPE_BOOL:
630 *out += value.as_bool ? "true" : "false";
632 case TRACE_VALUE_TYPE_UINT:
633 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
635 case TRACE_VALUE_TYPE_INT:
636 StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
638 case TRACE_VALUE_TYPE_DOUBLE: {
639 // FIXME: base/json/json_writer.cc is using the same code,
640 // should be made into a common method.
642 double val = value.as_double;
644 real = DoubleToString(val);
645 // Ensure that the number has a .0 if there's no decimal or 'e'. This
646 // makes sure that when we read the JSON back, it's interpreted as a
647 // real rather than an int.
648 if (real.find('.') == std::string::npos &&
649 real.find('e') == std::string::npos &&
650 real.find('E') == std::string::npos) {
653 // The JSON spec requires that non-integer values in the range (-1,1)
654 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
655 if (real[0] == '.') {
657 } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
658 // "-.1" bad "-0.1" good
661 } else if (IsNaN(val)){
662 // The JSON spec doesn't allow NaN and Infinity (since these are
663 // objects in EcmaScript). Use strings instead.
665 } else if (val < 0) {
666 real = "\"-Infinity\"";
668 real = "\"Infinity\"";
670 StringAppendF(out, "%s", real.c_str());
673 case TRACE_VALUE_TYPE_POINTER:
674 // JSON only supports double and int numbers.
675 // So as not to lose bits from a 64-bit pointer, output as a hex string.
676 StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
677 reinterpret_cast<intptr_t>(
680 case TRACE_VALUE_TYPE_STRING:
681 case TRACE_VALUE_TYPE_COPY_STRING:
682 EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
685 NOTREACHED() << "Don't know how to print this value";
690 void TraceEvent::AppendAsJSON(std::string* out) const {
691 int64 time_int64 = timestamp_.ToInternalValue();
692 int process_id = TraceLog::GetInstance()->process_id();
693 // Category group checked at category creation time.
694 DCHECK(!strchr(name_, '"'));
696 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
697 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
698 TraceLog::GetCategoryGroupName(category_group_enabled_),
705 // Output argument names and values, stop at first NULL argument name.
706 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
710 *out += arg_names_[i];
713 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
714 convertable_values_[i]->AppendAsTraceFormat(out);
716 AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
720 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
721 int64 duration = duration_.ToInternalValue();
723 StringAppendF(out, ",\"dur\":%" PRId64, duration);
724 if (!thread_timestamp_.is_null()) {
725 int64 thread_duration = thread_duration_.ToInternalValue();
726 if (thread_duration != -1)
727 StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
731 // Output tts if thread_timestamp is valid.
732 if (!thread_timestamp_.is_null()) {
733 int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
734 StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
737 // If id_ is set, print it out as a hex string so we don't loose any
738 // bits (it might be a 64-bit pointer).
739 if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
740 StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
742 // Instant events also output their scope.
743 if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
745 switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
746 case TRACE_EVENT_SCOPE_GLOBAL:
747 scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
750 case TRACE_EVENT_SCOPE_PROCESS:
751 scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
754 case TRACE_EVENT_SCOPE_THREAD:
755 scope = TRACE_EVENT_SCOPE_NAME_THREAD;
758 StringAppendF(out, ",\"s\":\"%c\"", scope);
764 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
765 *out << name_ << "[";
766 *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
770 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
773 *out << arg_names_[i] << ":";
774 std::string value_as_text;
776 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
777 convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
779 AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
781 *out << value_as_text;
787 ////////////////////////////////////////////////////////////////////////////////
791 ////////////////////////////////////////////////////////////////////////////////
793 TraceResultBuffer::OutputCallback
794 TraceResultBuffer::SimpleOutput::GetCallback() {
795 return Bind(&SimpleOutput::Append, Unretained(this));
798 void TraceResultBuffer::SimpleOutput::Append(
799 const std::string& json_trace_output) {
800 json_output += json_trace_output;
803 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
806 TraceResultBuffer::~TraceResultBuffer() {
809 void TraceResultBuffer::SetOutputCallback(
810 const OutputCallback& json_chunk_callback) {
811 output_callback_ = json_chunk_callback;
814 void TraceResultBuffer::Start() {
815 append_comma_ = false;
816 output_callback_.Run("[");
819 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
821 output_callback_.Run(",");
822 append_comma_ = true;
823 output_callback_.Run(trace_fragment);
826 void TraceResultBuffer::Finish() {
827 output_callback_.Run("]");
830 ////////////////////////////////////////////////////////////////////////////////
832 // TraceSamplingThread
834 ////////////////////////////////////////////////////////////////////////////////
835 class TraceBucketData;
836 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
838 class TraceBucketData {
840 TraceBucketData(base::subtle::AtomicWord* bucket,
842 TraceSampleCallback callback);
845 TRACE_EVENT_API_ATOMIC_WORD* bucket;
846 const char* bucket_name;
847 TraceSampleCallback callback;
850 // This object must be created on the IO thread.
851 class TraceSamplingThread : public PlatformThread::Delegate {
853 TraceSamplingThread();
854 virtual ~TraceSamplingThread();
856 // Implementation of PlatformThread::Delegate:
857 virtual void ThreadMain() OVERRIDE;
859 static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
862 void WaitSamplingEventForTesting();
865 friend class TraceLog;
868 // Not thread-safe. Once the ThreadMain has been called, this can no longer
870 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
871 const char* const name,
872 TraceSampleCallback callback);
873 // Splits a combined "category\0name" into the two component parts.
874 static void ExtractCategoryAndName(const char* combined,
875 const char** category,
877 std::vector<TraceBucketData> sample_buckets_;
878 bool thread_running_;
879 CancellationFlag cancellation_flag_;
880 WaitableEvent waitable_event_for_testing_;
884 TraceSamplingThread::TraceSamplingThread()
885 : thread_running_(false),
886 waitable_event_for_testing_(false, false) {
889 TraceSamplingThread::~TraceSamplingThread() {
892 void TraceSamplingThread::ThreadMain() {
893 PlatformThread::SetName("Sampling Thread");
894 thread_running_ = true;
895 const int kSamplingFrequencyMicroseconds = 1000;
896 while (!cancellation_flag_.IsSet()) {
897 PlatformThread::Sleep(
898 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
900 waitable_event_for_testing_.Signal();
905 void TraceSamplingThread::DefaultSamplingCallback(
906 TraceBucketData* bucket_data) {
907 TRACE_EVENT_API_ATOMIC_WORD category_and_name =
908 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
909 if (!category_and_name)
911 const char* const combined =
912 reinterpret_cast<const char* const>(category_and_name);
913 const char* category_group;
915 ExtractCategoryAndName(combined, &category_group, &name);
916 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
917 TraceLog::GetCategoryGroupEnabled(category_group),
918 name, 0, 0, NULL, NULL, NULL, NULL, 0);
921 void TraceSamplingThread::GetSamples() {
922 for (size_t i = 0; i < sample_buckets_.size(); ++i) {
923 TraceBucketData* bucket_data = &sample_buckets_[i];
924 bucket_data->callback.Run(bucket_data);
928 void TraceSamplingThread::RegisterSampleBucket(
929 TRACE_EVENT_API_ATOMIC_WORD* bucket,
930 const char* const name,
931 TraceSampleCallback callback) {
932 // Access to sample_buckets_ doesn't cause races with the sampling thread
933 // that uses the sample_buckets_, because it is guaranteed that
934 // RegisterSampleBucket is called before the sampling thread is created.
935 DCHECK(!thread_running_);
936 sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
940 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
941 const char** category,
943 *category = combined;
944 *name = &combined[strlen(combined) + 1];
947 void TraceSamplingThread::Stop() {
948 cancellation_flag_.Set();
951 void TraceSamplingThread::WaitSamplingEventForTesting() {
952 waitable_event_for_testing_.Wait();
955 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
957 TraceSampleCallback callback)
963 TraceBucketData::~TraceBucketData() {
966 ////////////////////////////////////////////////////////////////////////////////
970 ////////////////////////////////////////////////////////////////////////////////
972 class TraceLog::ThreadLocalEventBuffer
973 : public MessageLoop::DestructionObserver {
975 ThreadLocalEventBuffer(TraceLog* trace_log);
976 virtual ~ThreadLocalEventBuffer();
978 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
980 void ReportOverhead(const TimeTicks& event_timestamp,
981 const TimeTicks& event_thread_timestamp);
983 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
984 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
985 handle.chunk_index != chunk_index_)
988 return chunk_->GetEventAt(handle.event_index);
991 int generation() const { return generation_; }
994 // MessageLoop::DestructionObserver
995 virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
997 void FlushWhileLocked();
999 void CheckThisIsCurrentBuffer() const {
1000 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1003 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1004 // as long as the thread exists.
1005 TraceLog* trace_log_;
1006 scoped_ptr<TraceBufferChunk> chunk_;
1007 size_t chunk_index_;
1009 TimeDelta overhead_;
1012 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
1015 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
1016 : trace_log_(trace_log),
1019 generation_(trace_log->generation()) {
1020 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1021 // the following message_loop won't be NULL.
1022 MessageLoop* message_loop = MessageLoop::current();
1023 message_loop->AddDestructionObserver(this);
1025 AutoLock lock(trace_log->lock_);
1026 trace_log->thread_message_loops_.insert(message_loop);
1029 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1030 CheckThisIsCurrentBuffer();
1031 MessageLoop::current()->RemoveDestructionObserver(this);
1033 // Zero event_count_ happens in either of the following cases:
1034 // - no event generated for the thread;
1035 // - the thread has no message loop;
1036 // - trace_event_overhead is disabled.
1038 InitializeMetadataEvent(AddTraceEvent(NULL),
1039 static_cast<int>(base::PlatformThread::CurrentId()),
1040 "overhead", "average_overhead",
1041 overhead_.InMillisecondsF() / event_count_);
1045 AutoLock lock(trace_log_->lock_);
1047 trace_log_->thread_message_loops_.erase(MessageLoop::current());
1049 trace_log_->thread_local_event_buffer_.Set(NULL);
1052 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1053 TraceEventHandle* handle) {
1054 CheckThisIsCurrentBuffer();
1056 if (chunk_ && chunk_->IsFull()) {
1057 AutoLock lock(trace_log_->lock_);
1062 AutoLock lock(trace_log_->lock_);
1063 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
1064 trace_log_->CheckIfBufferIsFullWhileLocked();
1070 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1071 if (trace_event && handle)
1072 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1077 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1078 const TimeTicks& event_timestamp,
1079 const TimeTicks& event_thread_timestamp) {
1080 if (!g_category_group_enabled[g_category_trace_event_overhead])
1083 CheckThisIsCurrentBuffer();
1086 TimeTicks thread_now = ThreadNow();
1087 TimeTicks now = trace_log_->OffsetNow();
1088 TimeDelta overhead = now - event_timestamp;
1089 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1090 TraceEvent* trace_event = AddTraceEvent(NULL);
1092 trace_event->Initialize(
1093 static_cast<int>(PlatformThread::CurrentId()),
1094 event_timestamp, event_thread_timestamp,
1095 TRACE_EVENT_PHASE_COMPLETE,
1096 &g_category_group_enabled[g_category_trace_event_overhead],
1097 "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
1098 trace_event->UpdateDuration(now, thread_now);
1101 overhead_ += overhead;
1104 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1108 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1112 trace_log_->lock_.AssertAcquired();
1113 if (trace_log_->CheckGeneration(generation_)) {
1114 // Return the chunk to the buffer only if the generation matches.
1115 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1117 // Otherwise this method may be called from the destructor, or TraceLog will
1118 // find the generation mismatch and delete this buffer soon.
1122 TraceLog* TraceLog::GetInstance() {
1123 return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1126 TraceLog::TraceLog()
1128 num_traces_recorded_(0),
1130 dispatching_to_observer_list_(false),
1131 process_sort_index_(0),
1132 process_id_hash_(0),
1135 trace_options_(RECORD_UNTIL_FULL),
1136 sampling_thread_handle_(0),
1137 category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1138 event_callback_category_filter_(
1139 CategoryFilter::kDefaultCategoryFilterString),
1140 thread_shared_chunk_index_(0),
1142 // Trace is enabled or disabled on one thread while other threads are
1143 // accessing the enabled flag. We don't care whether edge-case events are
1144 // traced or not, so we allow races on the enabled flag to keep the trace
1146 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1147 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1148 // sizeof(g_category_group_enabled),
1149 // "trace_event category enabled");
1150 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
1151 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
1152 "trace_event category enabled");
1154 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1157 SetProcessID(static_cast<int>(GetCurrentProcId()));
1159 // NaCl also shouldn't access the command line.
1160 if (CommandLine::InitializedForCurrentProcess() &&
1161 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
1162 std::string filter = CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1163 switches::kTraceToConsole);
1164 if (filter.empty()) {
1165 filter = kEchoToConsoleCategoryFilter;
1168 filter.append(kEchoToConsoleCategoryFilter);
1171 LOG(ERROR) << "Start " << switches::kTraceToConsole
1172 << " with CategoryFilter '" << filter << "'.";
1173 SetEnabled(CategoryFilter(filter), RECORDING_MODE, ECHO_TO_CONSOLE);
1177 logged_events_.reset(CreateTraceBuffer());
1180 TraceLog::~TraceLog() {
1183 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1184 const char* category_group) {
1185 TraceLog* tracelog = GetInstance();
1187 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
1188 return &g_category_group_enabled[g_category_already_shutdown];
1190 return tracelog->GetCategoryGroupEnabledInternal(category_group);
1193 const char* TraceLog::GetCategoryGroupName(
1194 const unsigned char* category_group_enabled) {
1195 // Calculate the index of the category group by finding
1196 // category_group_enabled in g_category_group_enabled array.
1197 uintptr_t category_begin =
1198 reinterpret_cast<uintptr_t>(g_category_group_enabled);
1199 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
1200 DCHECK(category_ptr >= category_begin &&
1201 category_ptr < reinterpret_cast<uintptr_t>(
1202 g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1203 "out of bounds category pointer";
1204 uintptr_t category_index =
1205 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1206 return g_category_groups[category_index];
1209 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1210 unsigned char enabled_flag = 0;
1211 const char* category_group = g_category_groups[category_index];
1212 if (mode_ == RECORDING_MODE &&
1213 category_filter_.IsCategoryGroupEnabled(category_group))
1214 enabled_flag |= ENABLED_FOR_RECORDING;
1215 else if (mode_ == MONITORING_MODE &&
1216 category_filter_.IsCategoryGroupEnabled(category_group))
1217 enabled_flag |= ENABLED_FOR_MONITORING;
1218 if (event_callback_ &&
1219 event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
1220 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1221 g_category_group_enabled[category_index] = enabled_flag;
1224 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1225 for (int i = 0; i < g_category_index; i++)
1226 UpdateCategoryGroupEnabledFlag(i);
1229 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() {
1230 ResetTraceEventSyntheticDelays();
1231 const CategoryFilter::StringList& delays =
1232 category_filter_.GetSyntheticDelayValues();
1233 CategoryFilter::StringList::const_iterator ci;
1234 for (ci = delays.begin(); ci != delays.end(); ++ci) {
1235 StringTokenizer tokens(*ci, ";");
1236 if (!tokens.GetNext())
1238 TraceEventSyntheticDelay* delay =
1239 TraceEventSyntheticDelay::Lookup(tokens.token());
1240 while (tokens.GetNext()) {
1241 std::string token = tokens.token();
1243 double target_duration = strtod(token.c_str(), &duration_end);
1244 if (duration_end != token.c_str()) {
1245 delay->SetTargetDuration(
1246 TimeDelta::FromMicroseconds(target_duration * 1e6));
1247 } else if (token == "static") {
1248 delay->SetMode(TraceEventSyntheticDelay::STATIC);
1249 } else if (token == "oneshot") {
1250 delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT);
1251 } else if (token == "alternating") {
1252 delay->SetMode(TraceEventSyntheticDelay::ALTERNATING);
1258 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1259 const char* category_group) {
1260 DCHECK(!strchr(category_group, '"')) <<
1261 "Category groups may not contain double quote";
1262 AutoLock lock(lock_);
1264 unsigned char* category_group_enabled = NULL;
1265 // Search for pre-existing category group.
1266 for (int i = 0; i < g_category_index; i++) {
1267 if (strcmp(g_category_groups[i], category_group) == 0) {
1268 category_group_enabled = &g_category_group_enabled[i];
1273 if (!category_group_enabled) {
1274 // Create a new category group
1275 DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
1276 "must increase MAX_CATEGORY_GROUPS";
1277 if (g_category_index < MAX_CATEGORY_GROUPS) {
1278 int new_index = g_category_index++;
1279 // Don't hold on to the category_group pointer, so that we can create
1280 // category groups with strings not known at compile time (this is
1281 // required by SetWatchEvent).
1282 const char* new_group = strdup(category_group);
1283 ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1284 g_category_groups[new_index] = new_group;
1285 DCHECK(!g_category_group_enabled[new_index]);
1286 // Note that if both included and excluded patterns in the
1287 // CategoryFilter are empty, we exclude nothing,
1288 // thereby enabling this category group.
1289 UpdateCategoryGroupEnabledFlag(new_index);
1290 category_group_enabled = &g_category_group_enabled[new_index];
1292 category_group_enabled =
1293 &g_category_group_enabled[g_category_categories_exhausted];
1296 return category_group_enabled;
1299 void TraceLog::GetKnownCategoryGroups(
1300 std::vector<std::string>* category_groups) {
1301 AutoLock lock(lock_);
1302 category_groups->push_back(
1303 g_category_groups[g_category_trace_event_overhead]);
1304 for (int i = g_num_builtin_categories; i < g_category_index; i++)
1305 category_groups->push_back(g_category_groups[i]);
1308 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
1311 std::vector<EnabledStateObserver*> observer_list;
1313 AutoLock lock(lock_);
1315 // Can't enable tracing when Flush() is in progress.
1316 DCHECK(!flush_message_loop_proxy_.get());
1318 Options old_options = trace_options();
1321 if (options != old_options) {
1322 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
1323 << "set of options.";
1326 if (mode != mode_) {
1327 DLOG(ERROR) << "Attemting to re-enable tracing with a different mode.";
1330 category_filter_.Merge(category_filter);
1331 UpdateCategoryGroupEnabledFlags();
1335 if (dispatching_to_observer_list_) {
1337 "Cannot manipulate TraceLog::Enabled state from an observer.";
1343 if (options != old_options) {
1344 subtle::NoBarrier_Store(&trace_options_, options);
1345 UseNextTraceBuffer();
1348 num_traces_recorded_++;
1350 category_filter_ = CategoryFilter(category_filter);
1351 UpdateCategoryGroupEnabledFlags();
1352 UpdateSyntheticDelaysFromCategoryFilter();
1354 if (options & ENABLE_SAMPLING) {
1355 sampling_thread_.reset(new TraceSamplingThread);
1356 sampling_thread_->RegisterSampleBucket(
1359 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1360 sampling_thread_->RegisterSampleBucket(
1363 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1364 sampling_thread_->RegisterSampleBucket(
1367 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1368 if (!PlatformThread::Create(
1369 0, sampling_thread_.get(), &sampling_thread_handle_)) {
1370 DCHECK(false) << "failed to create thread";
1374 dispatching_to_observer_list_ = true;
1375 observer_list = enabled_state_observer_list_;
1377 // Notify observers outside the lock in case they trigger trace events.
1378 for (size_t i = 0; i < observer_list.size(); ++i)
1379 observer_list[i]->OnTraceLogEnabled();
1382 AutoLock lock(lock_);
1383 dispatching_to_observer_list_ = false;
1387 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
1388 AutoLock lock(lock_);
1389 return category_filter_;
1392 void TraceLog::SetDisabled() {
1393 AutoLock lock(lock_);
1394 SetDisabledWhileLocked();
1397 void TraceLog::SetDisabledWhileLocked() {
1398 lock_.AssertAcquired();
1403 if (dispatching_to_observer_list_) {
1405 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1411 if (sampling_thread_.get()) {
1412 // Stop the sampling thread.
1413 sampling_thread_->Stop();
1415 PlatformThread::Join(sampling_thread_handle_);
1417 sampling_thread_handle_ = PlatformThreadHandle();
1418 sampling_thread_.reset();
1421 category_filter_.Clear();
1422 subtle::NoBarrier_Store(&watch_category_, 0);
1423 watch_event_name_ = "";
1424 UpdateCategoryGroupEnabledFlags();
1425 AddMetadataEventsWhileLocked();
1427 dispatching_to_observer_list_ = true;
1428 std::vector<EnabledStateObserver*> observer_list =
1429 enabled_state_observer_list_;
1432 // Dispatch to observers outside the lock in case the observer triggers a
1434 AutoUnlock unlock(lock_);
1435 for (size_t i = 0; i < observer_list.size(); ++i)
1436 observer_list[i]->OnTraceLogDisabled();
1438 dispatching_to_observer_list_ = false;
1441 int TraceLog::GetNumTracesRecorded() {
1442 AutoLock lock(lock_);
1445 return num_traces_recorded_;
1448 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1449 enabled_state_observer_list_.push_back(listener);
1452 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1453 std::vector<EnabledStateObserver*>::iterator it =
1454 std::find(enabled_state_observer_list_.begin(),
1455 enabled_state_observer_list_.end(),
1457 if (it != enabled_state_observer_list_.end())
1458 enabled_state_observer_list_.erase(it);
1461 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1462 std::vector<EnabledStateObserver*>::const_iterator it =
1463 std::find(enabled_state_observer_list_.begin(),
1464 enabled_state_observer_list_.end(),
1466 return it != enabled_state_observer_list_.end();
1469 float TraceLog::GetBufferPercentFull() const {
1470 AutoLock lock(lock_);
1471 return static_cast<float>(static_cast<double>(logged_events_->Size()) /
1472 logged_events_->Capacity());
1475 bool TraceLog::BufferIsFull() const {
1476 AutoLock lock(lock_);
1477 return logged_events_->IsFull();
1480 TraceBuffer* TraceLog::CreateTraceBuffer() {
1481 Options options = trace_options();
1482 if (options & RECORD_CONTINUOUSLY)
1483 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
1484 else if ((options & ENABLE_SAMPLING) && mode_ == MONITORING_MODE)
1485 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
1486 else if (options & ECHO_TO_CONSOLE)
1487 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1488 return new TraceBufferVector();
1491 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1492 TraceEventHandle* handle, bool check_buffer_is_full) {
1493 lock_.AssertAcquired();
1495 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1496 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1497 thread_shared_chunk_.Pass());
1500 if (!thread_shared_chunk_) {
1501 thread_shared_chunk_ = logged_events_->GetChunk(
1502 &thread_shared_chunk_index_);
1503 if (check_buffer_is_full)
1504 CheckIfBufferIsFullWhileLocked();
1506 if (!thread_shared_chunk_)
1510 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1511 if (trace_event && handle) {
1512 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1513 event_index, handle);
1518 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1519 lock_.AssertAcquired();
1520 if (logged_events_->IsFull())
1521 SetDisabledWhileLocked();
1524 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
1526 AutoLock lock(lock_);
1527 subtle::NoBarrier_Store(&event_callback_,
1528 reinterpret_cast<subtle::AtomicWord>(cb));
1529 event_callback_category_filter_ = category_filter;
1530 UpdateCategoryGroupEnabledFlags();
1533 void TraceLog::SetEventCallbackDisabled() {
1534 AutoLock lock(lock_);
1535 subtle::NoBarrier_Store(&event_callback_, 0);
1536 UpdateCategoryGroupEnabledFlags();
1539 // Flush() works as the following:
1540 // 1. Flush() is called in threadA whose message loop is saved in
1541 // flush_message_loop_proxy_;
1542 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1543 // loop to flush the thread local buffers; otherwise finish the flush;
1544 // 3. FlushCurrentThread() deletes the thread local event buffer:
1545 // - The last batch of events of the thread are flushed into the main buffer;
1546 // - The message loop will be removed from thread_message_loops_;
1547 // If this is the last message loop, finish the flush;
1548 // 4. If any thread hasn't finish its flush in time, finish the flush.
1549 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1551 // Can't flush when tracing is enabled because otherwise PostTask would
1552 // - generate more trace events;
1553 // - deschedule the calling thread on some platforms causing inaccurate
1554 // timing of the trace events.
1555 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1557 cb.Run(empty_result, false);
1558 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1562 int generation = this->generation();
1564 AutoLock lock(lock_);
1565 DCHECK(!flush_message_loop_proxy_.get());
1566 flush_message_loop_proxy_ = MessageLoopProxy::current();
1567 DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1568 flush_output_callback_ = cb;
1570 if (thread_shared_chunk_) {
1571 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1572 thread_shared_chunk_.Pass());
1575 if (thread_message_loops_.size()) {
1576 for (hash_set<MessageLoop*>::const_iterator it =
1577 thread_message_loops_.begin();
1578 it != thread_message_loops_.end(); ++it) {
1581 Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
1583 flush_message_loop_proxy_->PostDelayedTask(
1585 Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
1586 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1591 FinishFlush(generation);
1594 void TraceLog::ConvertTraceEventsToTraceFormat(
1595 scoped_ptr<TraceBuffer> logged_events,
1596 const TraceLog::OutputCallback& flush_output_callback) {
1598 if (flush_output_callback.is_null())
1601 // The callback need to be called at least once even if there is no events
1602 // to let the caller know the completion of flush.
1603 bool has_more_events = true;
1605 scoped_refptr<RefCountedString> json_events_str_ptr =
1606 new RefCountedString();
1608 for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
1609 const TraceBufferChunk* chunk = logged_events->NextChunk();
1611 has_more_events = false;
1614 for (size_t j = 0; j < chunk->size(); ++j) {
1616 json_events_str_ptr->data().append(",");
1617 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
1621 flush_output_callback.Run(json_events_str_ptr, has_more_events);
1622 } while (has_more_events);
1625 void TraceLog::FinishFlush(int generation) {
1626 scoped_ptr<TraceBuffer> previous_logged_events;
1627 OutputCallback flush_output_callback;
1629 if (!CheckGeneration(generation))
1633 AutoLock lock(lock_);
1635 previous_logged_events.swap(logged_events_);
1636 UseNextTraceBuffer();
1637 thread_message_loops_.clear();
1639 flush_message_loop_proxy_ = NULL;
1640 flush_output_callback = flush_output_callback_;
1641 flush_output_callback_.Reset();
1644 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1645 flush_output_callback);
1648 // Run in each thread holding a local event buffer.
1649 void TraceLog::FlushCurrentThread(int generation) {
1651 AutoLock lock(lock_);
1652 if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1653 // This is late. The corresponding flush has finished.
1658 // This will flush the thread local buffer.
1659 delete thread_local_event_buffer_.Get();
1661 AutoLock lock(lock_);
1662 if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
1663 thread_message_loops_.size())
1666 flush_message_loop_proxy_->PostTask(
1668 Bind(&TraceLog::FinishFlush, Unretained(this), generation));
1671 void TraceLog::OnFlushTimeout(int generation) {
1673 AutoLock lock(lock_);
1674 if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1675 // Flush has finished before timeout.
1680 "The following threads haven't finished flush in time. "
1681 "If this happens stably for some thread, please call "
1682 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1683 "the thread to avoid its trace events from being lost.";
1684 for (hash_set<MessageLoop*>::const_iterator it =
1685 thread_message_loops_.begin();
1686 it != thread_message_loops_.end(); ++it) {
1687 LOG(WARNING) << "Thread: " << (*it)->thread_name();
1690 FinishFlush(generation);
1693 void TraceLog::FlushButLeaveBufferIntact(
1694 const TraceLog::OutputCallback& flush_output_callback) {
1695 scoped_ptr<TraceBuffer> previous_logged_events;
1697 AutoLock lock(lock_);
1698 AddMetadataEventsWhileLocked();
1699 if (thread_shared_chunk_) {
1700 // Return the chunk to the main buffer to flush the sampling data.
1701 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1702 thread_shared_chunk_.Pass());
1704 previous_logged_events = logged_events_->CloneForIteration().Pass();
1707 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1708 flush_output_callback);
1711 void TraceLog::UseNextTraceBuffer() {
1712 logged_events_.reset(CreateTraceBuffer());
1713 subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1714 thread_shared_chunk_.reset();
1715 thread_shared_chunk_index_ = 0;
1718 TraceEventHandle TraceLog::AddTraceEvent(
1720 const unsigned char* category_group_enabled,
1722 unsigned long long id,
1724 const char** arg_names,
1725 const unsigned char* arg_types,
1726 const unsigned long long* arg_values,
1727 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1728 unsigned char flags) {
1729 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1730 base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1731 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1732 name, id, thread_id, now,
1733 num_args, arg_names,
1734 arg_types, arg_values,
1735 convertable_values, flags);
1738 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1740 const unsigned char* category_group_enabled,
1742 unsigned long long id,
1744 const TimeTicks& timestamp,
1746 const char** arg_names,
1747 const unsigned char* arg_types,
1748 const unsigned long long* arg_values,
1749 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1750 unsigned char flags) {
1751 TraceEventHandle handle = { 0, 0, 0 };
1752 if (!*category_group_enabled)
1755 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1756 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1757 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1758 if (thread_is_in_trace_event_.Get())
1761 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1765 if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1766 id ^= process_id_hash_;
1768 TimeTicks now = OffsetTimestamp(timestamp);
1769 TimeTicks thread_now = ThreadNow();
1771 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1772 // A ThreadLocalEventBuffer needs the message loop
1773 // - to know when the thread exits;
1774 // - to handle the final flush.
1775 // For a thread without a message loop or the message loop may be blocked, the
1776 // trace events will be added into the main buffer directly.
1777 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1778 thread_local_event_buffer = thread_local_event_buffer_.Get();
1779 if (thread_local_event_buffer &&
1780 !CheckGeneration(thread_local_event_buffer->generation())) {
1781 delete thread_local_event_buffer;
1782 thread_local_event_buffer = NULL;
1784 if (!thread_local_event_buffer) {
1785 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1786 thread_local_event_buffer_.Set(thread_local_event_buffer);
1790 // Check and update the current thread name only if the event is for the
1791 // current thread to avoid locks in most cases.
1792 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1793 const char* new_name = ThreadIdNameManager::GetInstance()->
1795 // Check if the thread name has been set or changed since the previous
1796 // call (if any), but don't bother if the new name is empty. Note this will
1797 // not detect a thread name change within the same char* buffer address: we
1798 // favor common case performance over corner case correctness.
1799 if (new_name != g_current_thread_name.Get().Get() &&
1800 new_name && *new_name) {
1801 g_current_thread_name.Get().Set(new_name);
1803 AutoLock thread_info_lock(thread_info_lock_);
1805 hash_map<int, std::string>::iterator existing_name =
1806 thread_names_.find(thread_id);
1807 if (existing_name == thread_names_.end()) {
1808 // This is a new thread id, and a new name.
1809 thread_names_[thread_id] = new_name;
1811 // This is a thread id that we've seen before, but potentially with a
1813 std::vector<StringPiece> existing_names;
1814 Tokenize(existing_name->second, ",", &existing_names);
1815 bool found = std::find(existing_names.begin(),
1816 existing_names.end(),
1817 new_name) != existing_names.end();
1819 if (existing_names.size())
1820 existing_name->second.push_back(',');
1821 existing_name->second.append(new_name);
1827 std::string console_message;
1828 if (*category_group_enabled &
1829 (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
1830 OptionalAutoLock lock(lock_);
1832 TraceEvent* trace_event = NULL;
1833 if (thread_local_event_buffer) {
1834 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1836 lock.EnsureAcquired();
1837 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1841 trace_event->Initialize(thread_id, now, thread_now, phase,
1842 category_group_enabled, name, id,
1843 num_args, arg_names, arg_types, arg_values,
1844 convertable_values, flags);
1846 #if defined(OS_ANDROID)
1847 trace_event->SendToATrace();
1851 if (trace_options() & ECHO_TO_CONSOLE) {
1852 console_message = EventToConsoleMessage(
1853 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1854 timestamp, trace_event);
1858 if (console_message.size())
1859 LOG(ERROR) << console_message;
1861 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1862 &watch_category_)) == category_group_enabled) {
1863 bool event_name_matches;
1864 WatchEventCallback watch_event_callback_copy;
1866 AutoLock lock(lock_);
1867 event_name_matches = watch_event_name_ == name;
1868 watch_event_callback_copy = watch_event_callback_;
1870 if (event_name_matches) {
1871 if (!watch_event_callback_copy.is_null())
1872 watch_event_callback_copy.Run();
1876 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1877 EventCallback event_callback = reinterpret_cast<EventCallback>(
1878 subtle::NoBarrier_Load(&event_callback_));
1879 if (event_callback) {
1881 phase == TRACE_EVENT_PHASE_COMPLETE ?
1882 TRACE_EVENT_PHASE_BEGIN : phase,
1883 category_group_enabled, name, id,
1884 num_args, arg_names, arg_types, arg_values,
1889 if (thread_local_event_buffer)
1890 thread_local_event_buffer->ReportOverhead(now, thread_now);
1895 // May be called when a COMPELETE event ends and the unfinished event has been
1896 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1897 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1898 const TimeTicks& timestamp,
1899 TraceEvent* trace_event) {
1900 AutoLock thread_info_lock(thread_info_lock_);
1902 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1903 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1904 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1907 int thread_id = trace_event ?
1908 trace_event->thread_id() : PlatformThread::CurrentId();
1909 if (phase == TRACE_EVENT_PHASE_END) {
1910 duration = timestamp - thread_event_start_times_[thread_id].top();
1911 thread_event_start_times_[thread_id].pop();
1914 std::string thread_name = thread_names_[thread_id];
1915 if (thread_colors_.find(thread_name) == thread_colors_.end())
1916 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1918 std::ostringstream log;
1919 log << base::StringPrintf("%s: \x1b[0;3%dm",
1920 thread_name.c_str(),
1921 thread_colors_[thread_name]);
1924 if (thread_event_start_times_.find(thread_id) !=
1925 thread_event_start_times_.end())
1926 depth = thread_event_start_times_[thread_id].size();
1928 for (size_t i = 0; i < depth; ++i)
1932 trace_event->AppendPrettyPrinted(&log);
1933 if (phase == TRACE_EVENT_PHASE_END)
1934 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1938 if (phase == TRACE_EVENT_PHASE_BEGIN)
1939 thread_event_start_times_[thread_id].push(timestamp);
1944 void TraceLog::AddTraceEventEtw(char phase,
1947 const char* extra) {
1949 TraceEventETWProvider::Trace(name, phase, id, extra);
1951 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1952 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1955 void TraceLog::AddTraceEventEtw(char phase,
1958 const std::string& extra)
1961 TraceEventETWProvider::Trace(name, phase, id, extra);
1963 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1964 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1967 void TraceLog::UpdateTraceEventDuration(
1968 const unsigned char* category_group_enabled,
1970 TraceEventHandle handle) {
1971 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1972 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1973 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1974 if (thread_is_in_trace_event_.Get())
1977 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1979 TimeTicks thread_now = ThreadNow();
1980 TimeTicks now = OffsetNow();
1982 std::string console_message;
1983 if (*category_group_enabled & ENABLED_FOR_RECORDING) {
1984 OptionalAutoLock lock(lock_);
1986 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1988 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1989 trace_event->UpdateDuration(now, thread_now);
1990 #if defined(OS_ANDROID)
1991 trace_event->SendToATrace();
1995 if (trace_options() & ECHO_TO_CONSOLE) {
1996 console_message = EventToConsoleMessage(TRACE_EVENT_PHASE_END,
2001 if (console_message.size())
2002 LOG(ERROR) << console_message;
2004 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
2005 EventCallback event_callback = reinterpret_cast<EventCallback>(
2006 subtle::NoBarrier_Load(&event_callback_));
2007 if (event_callback) {
2008 event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
2009 trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
2010 TRACE_EVENT_FLAG_NONE);
2015 void TraceLog::SetWatchEvent(const std::string& category_name,
2016 const std::string& event_name,
2017 const WatchEventCallback& callback) {
2018 const unsigned char* category = GetCategoryGroupEnabled(
2019 category_name.c_str());
2020 AutoLock lock(lock_);
2021 subtle::NoBarrier_Store(&watch_category_,
2022 reinterpret_cast<subtle::AtomicWord>(category));
2023 watch_event_name_ = event_name;
2024 watch_event_callback_ = callback;
2027 void TraceLog::CancelWatchEvent() {
2028 AutoLock lock(lock_);
2029 subtle::NoBarrier_Store(&watch_category_, 0);
2030 watch_event_name_ = "";
2031 watch_event_callback_.Reset();
2034 void TraceLog::AddMetadataEventsWhileLocked() {
2035 lock_.AssertAcquired();
2037 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
2038 if (process_sort_index_ != 0) {
2039 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2041 "process_sort_index", "sort_index",
2042 process_sort_index_);
2045 if (process_name_.size()) {
2046 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2048 "process_name", "name",
2052 if (process_labels_.size() > 0) {
2053 std::vector<std::string> labels;
2054 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
2055 it != process_labels_.end();
2057 labels.push_back(it->second);
2059 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2061 "process_labels", "labels",
2062 JoinString(labels, ','));
2065 // Thread sort indices.
2066 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
2067 it != thread_sort_indices_.end();
2069 if (it->second == 0)
2071 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2073 "thread_sort_index", "sort_index",
2078 AutoLock thread_info_lock(thread_info_lock_);
2079 for(hash_map<int, std::string>::iterator it = thread_names_.begin();
2080 it != thread_names_.end();
2082 if (it->second.empty())
2084 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
2086 "thread_name", "name",
2091 void TraceLog::WaitSamplingEventForTesting() {
2092 if (!sampling_thread_)
2094 sampling_thread_->WaitSamplingEventForTesting();
2097 void TraceLog::DeleteForTesting() {
2098 DeleteTraceLogForTesting::Delete();
2101 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2102 return GetEventByHandleInternal(handle, NULL);
2105 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2106 OptionalAutoLock* lock) {
2107 if (!handle.chunk_seq)
2110 if (thread_local_event_buffer_.Get()) {
2111 TraceEvent* trace_event =
2112 thread_local_event_buffer_.Get()->GetEventByHandle(handle);
2117 // The event has been out-of-control of the thread local buffer.
2118 // Try to get the event from the main buffer with a lock.
2120 lock->EnsureAcquired();
2122 if (thread_shared_chunk_ &&
2123 handle.chunk_index == thread_shared_chunk_index_) {
2124 return handle.chunk_seq == thread_shared_chunk_->seq() ?
2125 thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
2128 return logged_events_->GetEventByHandle(handle);
2131 void TraceLog::SetProcessID(int process_id) {
2132 process_id_ = process_id;
2133 // Create a FNV hash from the process ID for XORing.
2134 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2135 unsigned long long offset_basis = 14695981039346656037ull;
2136 unsigned long long fnv_prime = 1099511628211ull;
2137 unsigned long long pid = static_cast<unsigned long long>(process_id_);
2138 process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
2141 void TraceLog::SetProcessSortIndex(int sort_index) {
2142 AutoLock lock(lock_);
2143 process_sort_index_ = sort_index;
2146 void TraceLog::SetProcessName(const std::string& process_name) {
2147 AutoLock lock(lock_);
2148 process_name_ = process_name;
2151 void TraceLog::UpdateProcessLabel(
2152 int label_id, const std::string& current_label) {
2153 if(!current_label.length())
2154 return RemoveProcessLabel(label_id);
2156 AutoLock lock(lock_);
2157 process_labels_[label_id] = current_label;
2160 void TraceLog::RemoveProcessLabel(int label_id) {
2161 AutoLock lock(lock_);
2162 base::hash_map<int, std::string>::iterator it = process_labels_.find(
2164 if (it == process_labels_.end())
2167 process_labels_.erase(it);
2170 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2171 AutoLock lock(lock_);
2172 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
2175 void TraceLog::SetTimeOffset(TimeDelta offset) {
2176 time_offset_ = offset;
2179 size_t TraceLog::GetObserverCountForTest() const {
2180 return enabled_state_observer_list_.size();
2183 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2184 thread_blocks_message_loop_.Set(true);
2185 if (thread_local_event_buffer_.Get()) {
2186 // This will flush the thread local buffer.
2187 delete thread_local_event_buffer_.Get();
2191 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2192 const std::string& str) {
2193 return str.empty() ||
2195 str.at(str.length() - 1) == ' ';
2198 bool CategoryFilter::DoesCategoryGroupContainCategory(
2199 const char* category_group,
2200 const char* category) const {
2202 CStringTokenizer category_group_tokens(category_group,
2203 category_group + strlen(category_group), ",");
2204 while (category_group_tokens.GetNext()) {
2205 std::string category_group_token = category_group_tokens.token();
2206 // Don't allow empty tokens, nor tokens with leading or trailing space.
2207 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2208 category_group_token))
2209 << "Disallowed category string";
2210 if (MatchPattern(category_group_token.c_str(), category))
2216 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2217 if (!filter_string.empty())
2218 Initialize(filter_string);
2220 Initialize(CategoryFilter::kDefaultCategoryFilterString);
2223 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2224 : included_(cf.included_),
2225 disabled_(cf.disabled_),
2226 excluded_(cf.excluded_),
2227 delays_(cf.delays_) {
2230 CategoryFilter::~CategoryFilter() {
2233 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2237 included_ = rhs.included_;
2238 disabled_ = rhs.disabled_;
2239 excluded_ = rhs.excluded_;
2240 delays_ = rhs.delays_;
2244 void CategoryFilter::Initialize(const std::string& filter_string) {
2245 // Tokenize list of categories, delimited by ','.
2246 StringTokenizer tokens(filter_string, ",");
2247 // Add each token to the appropriate list (included_,excluded_).
2248 while (tokens.GetNext()) {
2249 std::string category = tokens.token();
2250 // Ignore empty categories.
2251 if (category.empty())
2253 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2254 if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
2255 category.at(category.size() - 1) == ')') {
2256 category = category.substr(
2257 strlen(kSyntheticDelayCategoryFilterPrefix),
2258 category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
2259 size_t name_length = category.find(';');
2260 if (name_length != std::string::npos && name_length > 0 &&
2261 name_length != category.size() - 1) {
2262 delays_.push_back(category);
2264 } else if (category.at(0) == '-') {
2265 // Excluded categories start with '-'.
2266 // Remove '-' from category string.
2267 category = category.substr(1);
2268 excluded_.push_back(category);
2269 } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2270 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2271 disabled_.push_back(category);
2273 included_.push_back(category);
2278 void CategoryFilter::WriteString(const StringList& values,
2280 bool included) const {
2281 bool prepend_comma = !out->empty();
2283 for (StringList::const_iterator ci = values.begin();
2284 ci != values.end(); ++ci) {
2285 if (token_cnt > 0 || prepend_comma)
2286 StringAppendF(out, ",");
2287 StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2292 void CategoryFilter::WriteString(const StringList& delays,
2293 std::string* out) const {
2294 bool prepend_comma = !out->empty();
2296 for (StringList::const_iterator ci = delays.begin();
2297 ci != delays.end(); ++ci) {
2298 if (token_cnt > 0 || prepend_comma)
2299 StringAppendF(out, ",");
2300 StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
2306 std::string CategoryFilter::ToString() const {
2307 std::string filter_string;
2308 WriteString(included_, &filter_string, true);
2309 WriteString(disabled_, &filter_string, true);
2310 WriteString(excluded_, &filter_string, false);
2311 WriteString(delays_, &filter_string);
2312 return filter_string;
2315 bool CategoryFilter::IsCategoryGroupEnabled(
2316 const char* category_group_name) const {
2317 // TraceLog should call this method only as part of enabling/disabling
2319 StringList::const_iterator ci;
2321 // Check the disabled- filters and the disabled-* wildcard first so that a
2322 // "*" filter does not include the disabled.
2323 for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2324 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2327 if (DoesCategoryGroupContainCategory(category_group_name,
2328 TRACE_DISABLED_BY_DEFAULT("*")))
2331 for (ci = included_.begin(); ci != included_.end(); ++ci) {
2332 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2336 for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
2337 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2340 // If the category group is not excluded, and there are no included patterns
2341 // we consider this pattern enabled.
2342 return included_.empty();
2345 bool CategoryFilter::HasIncludedPatterns() const {
2346 return !included_.empty();
2349 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2350 // Keep included patterns only if both filters have an included entry.
2351 // Otherwise, one of the filter was specifying "*" and we want to honour the
2353 if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2354 included_.insert(included_.end(),
2355 nested_filter.included_.begin(),
2356 nested_filter.included_.end());
2361 disabled_.insert(disabled_.end(),
2362 nested_filter.disabled_.begin(),
2363 nested_filter.disabled_.end());
2364 excluded_.insert(excluded_.end(),
2365 nested_filter.excluded_.begin(),
2366 nested_filter.excluded_.end());
2367 delays_.insert(delays_.end(),
2368 nested_filter.delays_.begin(),
2369 nested_filter.delays_.end());
2372 void CategoryFilter::Clear() {
2378 const CategoryFilter::StringList&
2379 CategoryFilter::GetSyntheticDelayValues() const {
2383 } // namespace debug
2386 namespace trace_event_internal {
2388 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2389 const char* category_group, const char* name) {
2390 // The single atom works because for now the category_group can only be "gpu".
2391 DCHECK(strcmp(category_group, "gpu") == 0);
2392 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2393 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2394 category_group, atomic, category_group_enabled_);
2396 if (*category_group_enabled_) {
2398 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2399 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2400 trace_event_internal::kNoEventId,
2401 static_cast<int>(base::PlatformThread::CurrentId()),
2402 base::TimeTicks::NowFromSystemTraceTime(),
2403 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2407 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2408 if (*category_group_enabled_) {
2409 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2410 name_, event_handle_);
2414 } // namespace trace_event_internal