1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/debug/trace_event_impl.h"
9 #include "base/base_switches.h"
10 #include "base/bind.h"
11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h"
14 #include "base/format_macros.h"
15 #include "base/lazy_instance.h"
16 #include "base/memory/singleton.h"
17 #include "base/message_loop/message_loop.h"
18 #include "base/process/process_metrics.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_number_conversions.h"
21 #include "base/strings/string_split.h"
22 #include "base/strings/string_tokenizer.h"
23 #include "base/strings/string_util.h"
24 #include "base/strings/stringprintf.h"
25 #include "base/strings/utf_string_conversions.h"
26 #include "base/synchronization/cancellation_flag.h"
27 #include "base/synchronization/waitable_event.h"
28 #include "base/sys_info.h"
29 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
30 #include "base/threading/platform_thread.h"
31 #include "base/threading/thread_id_name_manager.h"
32 #include "base/time/time.h"
35 #include "base/debug/trace_event_win.h"
38 class DeleteTraceLogForTesting {
40 static void Delete() {
41 Singleton<base::debug::TraceLog,
42 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
46 // The thread buckets for the sampling profiler.
47 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
54 // The overhead of TraceEvent above this threshold will be reported in the
56 const int kOverheadReportThresholdInMicroseconds = 50;
58 // Controls the number of trace events we will buffer in-memory
59 // before throwing them away.
60 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
61 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
62 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
63 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize;
64 // Can store results for 30 seconds with 1 ms sampling interval.
65 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
66 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
67 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
69 const int kThreadFlushTimeoutMs = 3000;
71 #define MAX_CATEGORY_GROUPS 100
73 // Parallel arrays g_category_groups and g_category_group_enabled are separate
74 // so that a pointer to a member of g_category_group_enabled can be easily
75 // converted to an index into g_category_groups. This allows macros to deal
76 // only with char enabled pointers from g_category_group_enabled, and we can
77 // convert internally to determine the category name from the char enabled
79 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
80 "tracing already shutdown",
81 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
83 // For reporting trace_event overhead. For thread local event buffers only.
84 "trace_event_overhead"};
86 // The enabled flag is char instead of bool so that the API can be used from C.
87 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
88 const int g_category_already_shutdown = 0;
89 const int g_category_categories_exhausted = 1;
90 const int g_category_metadata = 2;
91 const int g_category_trace_event_overhead = 3;
92 const int g_num_builtin_categories = 4;
93 int g_category_index = g_num_builtin_categories; // Skip default categories.
95 // The name of the current thread. This is used to decide if the current
96 // thread name has changed. We combine all the seen thread names into the
97 // output name for the thread.
98 LazyInstance<ThreadLocalPointer<const char> >::Leaky
99 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
101 const char kRecordUntilFull[] = "record-until-full";
102 const char kRecordContinuously[] = "record-continuously";
103 const char kEnableSampling[] = "enable-sampling";
104 const char kMonitorSampling[] = "monitor-sampling";
106 TimeTicks ThreadNow() {
107 return TimeTicks::IsThreadNowSupported() ?
108 TimeTicks::ThreadNow() : TimeTicks();
111 class TraceBufferRingBuffer : public TraceBuffer {
113 TraceBufferRingBuffer(size_t max_chunks)
114 : max_chunks_(max_chunks),
115 recyclable_chunks_queue_(new size_t[queue_capacity()]),
117 queue_tail_(max_chunks),
118 current_iteration_index_(0),
119 current_chunk_seq_(1) {
120 chunks_.reserve(max_chunks);
121 for (size_t i = 0; i < max_chunks; ++i)
122 recyclable_chunks_queue_[i] = i;
125 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
126 // Because the number of threads is much less than the number of chunks,
127 // the queue should never be empty.
128 DCHECK(!QueueIsEmpty());
130 *index = recyclable_chunks_queue_[queue_head_];
131 queue_head_ = NextQueueIndex(queue_head_);
132 current_iteration_index_ = queue_head_;
134 if (*index >= chunks_.size())
135 chunks_.resize(*index + 1);
137 TraceBufferChunk* chunk = chunks_[*index];
138 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
140 chunk->Reset(current_chunk_seq_++);
142 chunk = new TraceBufferChunk(current_chunk_seq_++);
144 return scoped_ptr<TraceBufferChunk>(chunk);
147 virtual void ReturnChunk(size_t index,
148 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
149 // When this method is called, the queue should not be full because it
150 // can contain all chunks including the one to be returned.
151 DCHECK(!QueueIsFull());
153 DCHECK_LT(index, chunks_.size());
154 DCHECK(!chunks_[index]);
155 chunks_[index] = chunk.release();
156 recyclable_chunks_queue_[queue_tail_] = index;
157 queue_tail_ = NextQueueIndex(queue_tail_);
160 virtual bool IsFull() const OVERRIDE {
164 virtual size_t Size() const OVERRIDE {
165 // This is approximate because not all of the chunks are full.
166 return chunks_.size() * kTraceBufferChunkSize;
169 virtual size_t Capacity() const OVERRIDE {
170 return max_chunks_ * kTraceBufferChunkSize;
173 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
174 if (handle.chunk_index >= chunks_.size())
176 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
177 if (!chunk || chunk->seq() != handle.chunk_seq)
179 return chunk->GetEventAt(handle.event_index);
182 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
186 while (current_iteration_index_ != queue_tail_) {
187 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
188 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
189 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
191 DCHECK(chunks_[chunk_index]);
192 return chunks_[chunk_index];
197 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
198 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
199 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
200 queue_index = NextQueueIndex(queue_index)) {
201 size_t chunk_index = recyclable_chunks_queue_[queue_index];
202 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
204 TraceBufferChunk* chunk = chunks_[chunk_index];
205 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
207 return cloned_buffer.PassAs<TraceBuffer>();
211 class ClonedTraceBuffer : public TraceBuffer {
213 ClonedTraceBuffer() : current_iteration_index_(0) {}
215 // The only implemented method.
216 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
217 return current_iteration_index_ < chunks_.size() ?
218 chunks_[current_iteration_index_++] : NULL;
221 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
223 return scoped_ptr<TraceBufferChunk>();
225 virtual void ReturnChunk(size_t index,
226 scoped_ptr<TraceBufferChunk>) OVERRIDE {
229 virtual bool IsFull() const OVERRIDE { return false; }
230 virtual size_t Size() const OVERRIDE { return 0; }
231 virtual size_t Capacity() const OVERRIDE { return 0; }
232 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
235 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
237 return scoped_ptr<TraceBuffer>();
240 size_t current_iteration_index_;
241 ScopedVector<TraceBufferChunk> chunks_;
244 bool QueueIsEmpty() const {
245 return queue_head_ == queue_tail_;
248 size_t QueueSize() const {
249 return queue_tail_ > queue_head_ ? queue_tail_ - queue_head_ :
250 queue_tail_ + queue_capacity() - queue_head_;
253 bool QueueIsFull() const {
254 return QueueSize() == queue_capacity() - 1;
257 size_t queue_capacity() const {
258 // One extra space to help distinguish full state and empty state.
259 return max_chunks_ + 1;
262 size_t NextQueueIndex(size_t index) const {
264 if (index >= queue_capacity())
270 ScopedVector<TraceBufferChunk> chunks_;
272 scoped_ptr<size_t[]> recyclable_chunks_queue_;
276 size_t current_iteration_index_;
277 uint32 current_chunk_seq_;
279 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
282 class TraceBufferVector : public TraceBuffer {
285 : in_flight_chunk_count_(0),
286 current_iteration_index_(0) {
287 chunks_.reserve(kTraceEventVectorBufferChunks);
290 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
291 // This function may be called when adding normal events or indirectly from
292 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
293 // have to add the metadata events and flush thread-local buffers even if
294 // the buffer is full.
295 *index = chunks_.size();
296 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
297 ++in_flight_chunk_count_;
298 // + 1 because zero chunk_seq is not allowed.
299 return scoped_ptr<TraceBufferChunk>(
300 new TraceBufferChunk(static_cast<uint32>(*index) + 1));
303 virtual void ReturnChunk(size_t index,
304 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
305 DCHECK_GT(in_flight_chunk_count_, 0u);
306 DCHECK_LT(index, chunks_.size());
307 DCHECK(!chunks_[index]);
308 --in_flight_chunk_count_;
309 chunks_[index] = chunk.release();
312 virtual bool IsFull() const OVERRIDE {
313 return chunks_.size() >= kTraceEventVectorBufferChunks;
316 virtual size_t Size() const OVERRIDE {
317 // This is approximate because not all of the chunks are full.
318 return chunks_.size() * kTraceBufferChunkSize;
321 virtual size_t Capacity() const OVERRIDE {
322 return kTraceEventVectorBufferChunks * kTraceBufferChunkSize;
325 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
326 if (handle.chunk_index >= chunks_.size())
328 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
329 if (!chunk || chunk->seq() != handle.chunk_seq)
331 return chunk->GetEventAt(handle.event_index);
334 virtual const TraceBufferChunk* NextChunk() OVERRIDE {
335 while (current_iteration_index_ < chunks_.size()) {
336 // Skip in-flight chunks.
337 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
344 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
346 return scoped_ptr<TraceBuffer>();
350 size_t in_flight_chunk_count_;
351 size_t current_iteration_index_;
352 ScopedVector<TraceBufferChunk> chunks_;
354 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
357 template <typename T>
358 void InitializeMetadataEvent(TraceEvent* trace_event,
360 const char* metadata_name, const char* arg_name,
366 unsigned char arg_type;
367 unsigned long long arg_value;
368 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
369 trace_event->Initialize(thread_id,
370 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
371 &g_category_group_enabled[g_category_metadata],
372 metadata_name, ::trace_event_internal::kNoEventId,
373 num_args, &arg_name, &arg_type, &arg_value, NULL,
374 TRACE_EVENT_FLAG_NONE);
379 void TraceBufferChunk::Reset(uint32 new_seq) {
380 for (size_t i = 0; i < next_free_; ++i)
386 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
388 *event_index = next_free_++;
389 return &chunk_[*event_index];
392 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
393 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
394 cloned_chunk->next_free_ = next_free_;
395 for (size_t i = 0; i < next_free_; ++i)
396 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
397 return cloned_chunk.Pass();
400 // A helper class that allows the lock to be acquired in the middle of the scope
401 // and unlocks at the end of scope if locked.
402 class TraceLog::OptionalAutoLock {
404 explicit OptionalAutoLock(Lock& lock)
409 ~OptionalAutoLock() {
414 void EnsureAcquired() {
421 void EnsureReleased() {
431 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
434 // Use this function instead of TraceEventHandle constructor to keep the
435 // overhead of ScopedTracer (trace_event.h) constructor minimum.
436 void MakeHandle(uint32 chunk_seq, size_t chunk_index, size_t event_index,
437 TraceEventHandle* handle) {
439 DCHECK(chunk_index < (1u << 16));
440 DCHECK(event_index < (1u << 16));
441 handle->chunk_seq = chunk_seq;
442 handle->chunk_index = static_cast<uint16>(chunk_index);
443 handle->event_index = static_cast<uint16>(event_index);
446 ////////////////////////////////////////////////////////////////////////////////
450 ////////////////////////////////////////////////////////////////////////////////
454 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
456 // Copies |*member| into |*buffer|, sets |*member| to point to this new
457 // location, and then advances |*buffer| by the amount written.
458 void CopyTraceEventParameter(char** buffer,
462 size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
463 DCHECK_LE(static_cast<int>(written), end - *buffer);
471 TraceEvent::TraceEvent()
472 : duration_(TimeDelta::FromInternalValue(-1)),
474 category_group_enabled_(NULL),
477 phase_(TRACE_EVENT_PHASE_BEGIN),
479 for (int i = 0; i < kTraceMaxNumArgs; ++i)
480 arg_names_[i] = NULL;
481 memset(arg_values_, 0, sizeof(arg_values_));
484 TraceEvent::~TraceEvent() {
487 void TraceEvent::CopyFrom(const TraceEvent& other) {
488 timestamp_ = other.timestamp_;
489 thread_timestamp_ = other.thread_timestamp_;
490 duration_ = other.duration_;
492 category_group_enabled_ = other.category_group_enabled_;
494 thread_id_ = other.thread_id_;
495 phase_ = other.phase_;
496 flags_ = other.flags_;
497 parameter_copy_storage_ = other.parameter_copy_storage_;
499 for (int i = 0; i < kTraceMaxNumArgs; ++i) {
500 arg_names_[i] = other.arg_names_[i];
501 arg_types_[i] = other.arg_types_[i];
502 arg_values_[i] = other.arg_values_[i];
503 convertable_values_[i] = other.convertable_values_[i];
507 void TraceEvent::Initialize(
510 TimeTicks thread_timestamp,
512 const unsigned char* category_group_enabled,
514 unsigned long long id,
516 const char** arg_names,
517 const unsigned char* arg_types,
518 const unsigned long long* arg_values,
519 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
520 unsigned char flags) {
521 timestamp_ = timestamp;
522 thread_timestamp_ = thread_timestamp;
523 duration_ = TimeDelta::FromInternalValue(-1);
525 category_group_enabled_ = category_group_enabled;
527 thread_id_ = thread_id;
531 // Clamp num_args since it may have been set by a third_party library.
532 num_args = (num_args > kTraceMaxNumArgs) ? kTraceMaxNumArgs : num_args;
534 for (; i < num_args; ++i) {
535 arg_names_[i] = arg_names[i];
536 arg_types_[i] = arg_types[i];
538 if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
539 convertable_values_[i] = convertable_values[i];
541 arg_values_[i].as_uint = arg_values[i];
543 for (; i < kTraceMaxNumArgs; ++i) {
544 arg_names_[i] = NULL;
545 arg_values_[i].as_uint = 0u;
546 convertable_values_[i] = NULL;
547 arg_types_[i] = TRACE_VALUE_TYPE_UINT;
550 bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
551 size_t alloc_size = 0;
553 alloc_size += GetAllocLength(name);
554 for (i = 0; i < num_args; ++i) {
555 alloc_size += GetAllocLength(arg_names_[i]);
556 if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
557 arg_types_[i] = TRACE_VALUE_TYPE_COPY_STRING;
561 bool arg_is_copy[kTraceMaxNumArgs];
562 for (i = 0; i < num_args; ++i) {
563 // No copying of convertable types, we retain ownership.
564 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
567 // We only take a copy of arg_vals if they are of type COPY_STRING.
568 arg_is_copy[i] = (arg_types_[i] == TRACE_VALUE_TYPE_COPY_STRING);
570 alloc_size += GetAllocLength(arg_values_[i].as_string);
574 parameter_copy_storage_ = new RefCountedString;
575 parameter_copy_storage_->data().resize(alloc_size);
576 char* ptr = string_as_array(¶meter_copy_storage_->data());
577 const char* end = ptr + alloc_size;
579 CopyTraceEventParameter(&ptr, &name_, end);
580 for (i = 0; i < num_args; ++i) {
581 CopyTraceEventParameter(&ptr, &arg_names_[i], end);
584 for (i = 0; i < num_args; ++i) {
585 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
588 CopyTraceEventParameter(&ptr, &arg_values_[i].as_string, end);
590 DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
594 void TraceEvent::Reset() {
595 // Only reset fields that won't be initialized in Initialize(), or that may
596 // hold references to other objects.
597 duration_ = TimeDelta::FromInternalValue(-1);
598 parameter_copy_storage_ = NULL;
599 for (int i = 0; i < kTraceMaxNumArgs; ++i)
600 convertable_values_[i] = NULL;
603 void TraceEvent::UpdateDuration(const TimeTicks& now) {
604 DCHECK(duration_.ToInternalValue() == -1);
605 duration_ = now - timestamp_;
609 void TraceEvent::AppendValueAsJSON(unsigned char type,
610 TraceEvent::TraceValue value,
612 std::string::size_type start_pos;
614 case TRACE_VALUE_TYPE_BOOL:
615 *out += value.as_bool ? "true" : "false";
617 case TRACE_VALUE_TYPE_UINT:
618 StringAppendF(out, "%" PRIu64, static_cast<uint64>(value.as_uint));
620 case TRACE_VALUE_TYPE_INT:
621 StringAppendF(out, "%" PRId64, static_cast<int64>(value.as_int));
623 case TRACE_VALUE_TYPE_DOUBLE: {
624 // FIXME: base/json/json_writer.cc is using the same code,
625 // should be made into a common method.
626 std::string real = DoubleToString(value.as_double);
627 // Ensure that the number has a .0 if there's no decimal or 'e'. This
628 // makes sure that when we read the JSON back, it's interpreted as a
629 // real rather than an int.
630 if (real.find('.') == std::string::npos &&
631 real.find('e') == std::string::npos &&
632 real.find('E') == std::string::npos) {
635 // The JSON spec requires that non-integer values in the range (-1,1)
636 // have a zero before the decimal point - ".52" is not valid, "0.52" is.
637 if (real[0] == '.') {
639 } else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
640 // "-.1" bad "-0.1" good
644 StringAppendF(out, "%s", real.c_str());
647 case TRACE_VALUE_TYPE_POINTER:
648 // JSON only supports double and int numbers.
649 // So as not to lose bits from a 64-bit pointer, output as a hex string.
650 StringAppendF(out, "\"0x%" PRIx64 "\"", static_cast<uint64>(
651 reinterpret_cast<intptr_t>(
654 case TRACE_VALUE_TYPE_STRING:
655 case TRACE_VALUE_TYPE_COPY_STRING:
657 start_pos = out->size();
658 *out += value.as_string ? value.as_string : "NULL";
659 // insert backslash before special characters for proper json format.
660 while ((start_pos = out->find_first_of("\\\"", start_pos)) !=
662 out->insert(start_pos, 1, '\\');
663 // skip inserted escape character and following character.
669 NOTREACHED() << "Don't know how to print this value";
674 void TraceEvent::AppendAsJSON(std::string* out) const {
675 int64 time_int64 = timestamp_.ToInternalValue();
676 int process_id = TraceLog::GetInstance()->process_id();
677 // Category group checked at category creation time.
678 DCHECK(!strchr(name_, '"'));
680 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
681 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
682 TraceLog::GetCategoryGroupName(category_group_enabled_),
689 // Output argument names and values, stop at first NULL argument name.
690 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
694 *out += arg_names_[i];
697 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
698 convertable_values_[i]->AppendAsTraceFormat(out);
700 AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
704 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
705 int64 duration = duration_.ToInternalValue();
707 StringAppendF(out, ",\"dur\":%" PRId64, duration);
710 // Output tts if thread_timestamp is valid.
711 if (!thread_timestamp_.is_null()) {
712 int64 thread_time_int64 = thread_timestamp_.ToInternalValue();
713 StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
716 // If id_ is set, print it out as a hex string so we don't loose any
717 // bits (it might be a 64-bit pointer).
718 if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
719 StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64>(id_));
721 // Instant events also output their scope.
722 if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
724 switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
725 case TRACE_EVENT_SCOPE_GLOBAL:
726 scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
729 case TRACE_EVENT_SCOPE_PROCESS:
730 scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
733 case TRACE_EVENT_SCOPE_THREAD:
734 scope = TRACE_EVENT_SCOPE_NAME_THREAD;
737 StringAppendF(out, ",\"s\":\"%c\"", scope);
743 void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
744 *out << name_ << "[";
745 *out << TraceLog::GetCategoryGroupName(category_group_enabled_);
749 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
752 *out << arg_names_[i] << ":";
753 std::string value_as_text;
755 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
756 convertable_values_[i]->AppendAsTraceFormat(&value_as_text);
758 AppendValueAsJSON(arg_types_[i], arg_values_[i], &value_as_text);
760 *out << value_as_text;
766 ////////////////////////////////////////////////////////////////////////////////
770 ////////////////////////////////////////////////////////////////////////////////
772 TraceResultBuffer::OutputCallback
773 TraceResultBuffer::SimpleOutput::GetCallback() {
774 return Bind(&SimpleOutput::Append, Unretained(this));
777 void TraceResultBuffer::SimpleOutput::Append(
778 const std::string& json_trace_output) {
779 json_output += json_trace_output;
782 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {
785 TraceResultBuffer::~TraceResultBuffer() {
788 void TraceResultBuffer::SetOutputCallback(
789 const OutputCallback& json_chunk_callback) {
790 output_callback_ = json_chunk_callback;
793 void TraceResultBuffer::Start() {
794 append_comma_ = false;
795 output_callback_.Run("[");
798 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
800 output_callback_.Run(",");
801 append_comma_ = true;
802 output_callback_.Run(trace_fragment);
805 void TraceResultBuffer::Finish() {
806 output_callback_.Run("]");
809 ////////////////////////////////////////////////////////////////////////////////
811 // TraceSamplingThread
813 ////////////////////////////////////////////////////////////////////////////////
814 class TraceBucketData;
815 typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
817 class TraceBucketData {
819 TraceBucketData(base::subtle::AtomicWord* bucket,
821 TraceSampleCallback callback);
824 TRACE_EVENT_API_ATOMIC_WORD* bucket;
825 const char* bucket_name;
826 TraceSampleCallback callback;
829 // This object must be created on the IO thread.
830 class TraceSamplingThread : public PlatformThread::Delegate {
832 TraceSamplingThread();
833 virtual ~TraceSamplingThread();
835 // Implementation of PlatformThread::Delegate:
836 virtual void ThreadMain() OVERRIDE;
838 static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
841 void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event);
844 friend class TraceLog;
847 // Not thread-safe. Once the ThreadMain has been called, this can no longer
849 void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
850 const char* const name,
851 TraceSampleCallback callback);
852 // Splits a combined "category\0name" into the two component parts.
853 static void ExtractCategoryAndName(const char* combined,
854 const char** category,
856 std::vector<TraceBucketData> sample_buckets_;
857 bool thread_running_;
858 scoped_ptr<CancellationFlag> cancellation_flag_;
859 scoped_ptr<WaitableEvent> waitable_event_for_testing_;
863 TraceSamplingThread::TraceSamplingThread()
864 : thread_running_(false) {
865 cancellation_flag_.reset(new CancellationFlag);
868 TraceSamplingThread::~TraceSamplingThread() {
871 void TraceSamplingThread::ThreadMain() {
872 PlatformThread::SetName("Sampling Thread");
873 thread_running_ = true;
874 const int kSamplingFrequencyMicroseconds = 1000;
875 while (!cancellation_flag_->IsSet()) {
876 PlatformThread::Sleep(
877 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
879 if (waitable_event_for_testing_.get())
880 waitable_event_for_testing_->Signal();
885 void TraceSamplingThread::DefaultSamplingCallback(
886 TraceBucketData* bucket_data) {
887 TRACE_EVENT_API_ATOMIC_WORD category_and_name =
888 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
889 if (!category_and_name)
891 const char* const combined =
892 reinterpret_cast<const char* const>(category_and_name);
893 const char* category_group;
895 ExtractCategoryAndName(combined, &category_group, &name);
896 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
897 TraceLog::GetCategoryGroupEnabled(category_group),
898 name, 0, 0, NULL, NULL, NULL, NULL, 0);
901 void TraceSamplingThread::GetSamples() {
902 for (size_t i = 0; i < sample_buckets_.size(); ++i) {
903 TraceBucketData* bucket_data = &sample_buckets_[i];
904 bucket_data->callback.Run(bucket_data);
908 void TraceSamplingThread::RegisterSampleBucket(
909 TRACE_EVENT_API_ATOMIC_WORD* bucket,
910 const char* const name,
911 TraceSampleCallback callback) {
912 DCHECK(!thread_running_);
913 sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
917 void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
918 const char** category,
920 *category = combined;
921 *name = &combined[strlen(combined) + 1];
924 void TraceSamplingThread::Stop() {
925 cancellation_flag_->Set();
928 void TraceSamplingThread::InstallWaitableEventForSamplingTesting(
929 WaitableEvent* waitable_event) {
930 waitable_event_for_testing_.reset(waitable_event);
933 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
935 TraceSampleCallback callback)
941 TraceBucketData::~TraceBucketData() {
944 ////////////////////////////////////////////////////////////////////////////////
948 ////////////////////////////////////////////////////////////////////////////////
950 class TraceLog::ThreadLocalEventBuffer
951 : public MessageLoop::DestructionObserver {
953 ThreadLocalEventBuffer(TraceLog* trace_log);
954 virtual ~ThreadLocalEventBuffer();
956 TraceEvent* AddTraceEvent(NotificationHelper* notifier,
957 TraceEventHandle* handle);
959 void ReportOverhead(const TimeTicks& event_timestamp,
960 const TimeTicks& event_thread_timestamp,
961 NotificationHelper* notifier);
963 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
964 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
965 handle.chunk_index != chunk_index_)
968 return chunk_->GetEventAt(handle.event_index);
971 int generation() const { return generation_; }
974 // MessageLoop::DestructionObserver
975 virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
977 void FlushWhileLocked();
979 void CheckThisIsCurrentBuffer() const {
980 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
983 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
984 // as long as the thread exists.
985 TraceLog* trace_log_;
986 scoped_ptr<TraceBufferChunk> chunk_;
992 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
995 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
996 : trace_log_(trace_log),
999 generation_(trace_log->generation()) {
1000 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
1001 // the following message_loop won't be NULL.
1002 MessageLoop* message_loop = MessageLoop::current();
1003 message_loop->AddDestructionObserver(this);
1005 AutoLock lock(trace_log->lock_);
1006 trace_log->thread_message_loops_.insert(message_loop);
1009 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
1010 CheckThisIsCurrentBuffer();
1011 MessageLoop::current()->RemoveDestructionObserver(this);
1013 // Zero event_count_ happens in either of the following cases:
1014 // - no event generated for the thread;
1015 // - the thread has no message loop;
1016 // - trace_event_overhead is disabled.
1018 NotificationHelper notifier(trace_log_);
1019 InitializeMetadataEvent(AddTraceEvent(¬ifier, NULL),
1020 static_cast<int>(base::PlatformThread::CurrentId()),
1021 "overhead", "average_overhead",
1022 overhead_.InMillisecondsF() / event_count_);
1023 notifier.SendNotificationIfAny();
1027 AutoLock lock(trace_log_->lock_);
1029 trace_log_->thread_message_loops_.erase(MessageLoop::current());
1031 trace_log_->thread_local_event_buffer_.Set(NULL);
1034 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
1035 NotificationHelper* notifier,
1036 TraceEventHandle* handle) {
1037 CheckThisIsCurrentBuffer();
1039 if (chunk_ && chunk_->IsFull()) {
1040 AutoLock lock(trace_log_->lock_);
1045 AutoLock lock(trace_log_->lock_);
1046 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
1047 trace_log_->CheckIfBufferIsFullWhileLocked(notifier);
1053 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1054 if (trace_event && handle)
1055 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1060 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1061 const TimeTicks& event_timestamp,
1062 const TimeTicks& event_thread_timestamp,
1063 NotificationHelper* notifier) {
1064 if (!g_category_group_enabled[g_category_trace_event_overhead])
1067 CheckThisIsCurrentBuffer();
1070 TimeTicks now = trace_log_->OffsetNow();
1071 TimeDelta overhead = now - event_timestamp;
1072 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1073 TraceEvent* trace_event = AddTraceEvent(notifier, NULL);
1075 trace_event->Initialize(
1076 static_cast<int>(PlatformThread::CurrentId()),
1077 event_timestamp, event_thread_timestamp,
1078 TRACE_EVENT_PHASE_COMPLETE,
1079 &g_category_group_enabled[g_category_trace_event_overhead],
1080 "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
1081 trace_event->UpdateDuration(now);
1084 overhead_ += overhead;
1087 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1091 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1095 trace_log_->lock_.AssertAcquired();
1096 if (trace_log_->CheckGeneration(generation_)) {
1097 // Return the chunk to the buffer only if the generation matches,
1098 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1100 // Otherwise this method may be called from the destructor, or TraceLog will
1101 // find the generation mismatch and delete this buffer soon.
1104 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
1105 : trace_log_(trace_log),
1109 TraceLog::NotificationHelper::~NotificationHelper() {
1112 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
1114 trace_log_->lock_.AssertAcquired();
1115 if (trace_log_->notification_callback_.is_null())
1117 if (notification_ == 0)
1118 callback_copy_ = trace_log_->notification_callback_;
1119 notification_ |= notification;
1122 void TraceLog::NotificationHelper::SendNotificationIfAny() {
1124 callback_copy_.Run(notification_);
1128 TraceLog* TraceLog::GetInstance() {
1129 return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1133 // Note, if you add more options here you also need to update:
1134 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString
1135 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) {
1136 std::vector<std::string> split;
1137 base::SplitString(options, ',', &split);
1139 for (std::vector<std::string>::iterator iter = split.begin();
1140 iter != split.end();
1142 if (*iter == kRecordUntilFull) {
1143 ret |= RECORD_UNTIL_FULL;
1144 } else if (*iter == kRecordContinuously) {
1145 ret |= RECORD_CONTINUOUSLY;
1146 } else if (*iter == kEnableSampling) {
1147 ret |= ENABLE_SAMPLING;
1148 } else if (*iter == kMonitorSampling) {
1149 ret |= MONITOR_SAMPLING;
1151 NOTREACHED(); // Unknown option provided.
1154 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
1155 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
1157 return static_cast<Options>(ret);
1160 TraceLog::TraceLog()
1162 num_traces_recorded_(0),
1165 dispatching_to_observer_list_(false),
1166 process_sort_index_(0),
1167 process_id_hash_(0),
1170 trace_options_(RECORD_UNTIL_FULL),
1171 sampling_thread_handle_(0),
1172 category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1173 thread_shared_chunk_index_(0),
1175 // Trace is enabled or disabled on one thread while other threads are
1176 // accessing the enabled flag. We don't care whether edge-case events are
1177 // traced or not, so we allow races on the enabled flag to keep the trace
1179 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1180 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1181 // sizeof(g_category_group_enabled),
1182 // "trace_event category enabled");
1183 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
1184 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
1185 "trace_event category enabled");
1187 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
1190 SetProcessID(static_cast<int>(GetCurrentProcId()));
1192 // NaCl also shouldn't access the command line.
1193 if (CommandLine::InitializedForCurrentProcess() &&
1194 CommandLine::ForCurrentProcess()->HasSwitch(switches::kTraceToConsole)) {
1195 std::string category_string =
1196 CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
1197 switches::kTraceToConsole);
1199 if (category_string.empty())
1200 category_string = "*";
1202 SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE);
1206 logged_events_.reset(CreateTraceBuffer());
1209 TraceLog::~TraceLog() {
1212 const unsigned char* TraceLog::GetCategoryGroupEnabled(
1213 const char* category_group) {
1214 TraceLog* tracelog = GetInstance();
1216 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
1217 return &g_category_group_enabled[g_category_already_shutdown];
1219 return tracelog->GetCategoryGroupEnabledInternal(category_group);
1222 const char* TraceLog::GetCategoryGroupName(
1223 const unsigned char* category_group_enabled) {
1224 // Calculate the index of the category group by finding
1225 // category_group_enabled in g_category_group_enabled array.
1226 uintptr_t category_begin =
1227 reinterpret_cast<uintptr_t>(g_category_group_enabled);
1228 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
1229 DCHECK(category_ptr >= category_begin &&
1230 category_ptr < reinterpret_cast<uintptr_t>(
1231 g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1232 "out of bounds category pointer";
1233 uintptr_t category_index =
1234 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1235 return g_category_groups[category_index];
1238 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1239 g_category_group_enabled[category_index] =
1241 category_filter_.IsCategoryGroupEnabled(
1242 g_category_groups[category_index]);
1245 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1246 for (int i = 0; i < g_category_index; i++)
1247 UpdateCategoryGroupEnabledFlag(i);
1250 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1251 const char* category_group) {
1252 DCHECK(!strchr(category_group, '"')) <<
1253 "Category groups may not contain double quote";
1254 AutoLock lock(lock_);
1256 unsigned char* category_group_enabled = NULL;
1257 // Search for pre-existing category group.
1258 for (int i = 0; i < g_category_index; i++) {
1259 if (strcmp(g_category_groups[i], category_group) == 0) {
1260 category_group_enabled = &g_category_group_enabled[i];
1265 if (!category_group_enabled) {
1266 // Create a new category group
1267 DCHECK(g_category_index < MAX_CATEGORY_GROUPS) <<
1268 "must increase MAX_CATEGORY_GROUPS";
1269 if (g_category_index < MAX_CATEGORY_GROUPS) {
1270 int new_index = g_category_index++;
1271 // Don't hold on to the category_group pointer, so that we can create
1272 // category groups with strings not known at compile time (this is
1273 // required by SetWatchEvent).
1274 const char* new_group = strdup(category_group);
1275 ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1276 g_category_groups[new_index] = new_group;
1277 DCHECK(!g_category_group_enabled[new_index]);
1278 // Note that if both included and excluded patterns in the
1279 // CategoryFilter are empty, we exclude nothing,
1280 // thereby enabling this category group.
1281 UpdateCategoryGroupEnabledFlag(new_index);
1282 category_group_enabled = &g_category_group_enabled[new_index];
1284 category_group_enabled =
1285 &g_category_group_enabled[g_category_categories_exhausted];
1288 return category_group_enabled;
1291 void TraceLog::GetKnownCategoryGroups(
1292 std::vector<std::string>* category_groups) {
1293 AutoLock lock(lock_);
1294 category_groups->push_back(
1295 g_category_groups[g_category_trace_event_overhead]);
1296 for (int i = g_num_builtin_categories; i < g_category_index; i++)
1297 category_groups->push_back(g_category_groups[i]);
1300 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
1302 std::vector<EnabledStateObserver*> observer_list;
1304 AutoLock lock(lock_);
1306 // Can't enable tracing when Flush() is in progress.
1307 DCHECK(!flush_message_loop_proxy_.get());
1309 Options old_options = trace_options();
1311 if (enable_count_++ > 0) {
1312 if (options != old_options) {
1313 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
1314 << "set of options.";
1317 category_filter_.Merge(category_filter);
1318 UpdateCategoryGroupEnabledFlags();
1322 if (options != old_options) {
1323 subtle::NoBarrier_Store(&trace_options_, options);
1324 logged_events_.reset(CreateTraceBuffer());
1326 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1329 if (dispatching_to_observer_list_) {
1331 "Cannot manipulate TraceLog::Enabled state from an observer.";
1335 num_traces_recorded_++;
1337 category_filter_ = CategoryFilter(category_filter);
1338 UpdateCategoryGroupEnabledFlags();
1340 if ((options & ENABLE_SAMPLING) || (options & MONITOR_SAMPLING)) {
1341 sampling_thread_.reset(new TraceSamplingThread);
1342 sampling_thread_->RegisterSampleBucket(
1345 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1346 sampling_thread_->RegisterSampleBucket(
1349 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1350 sampling_thread_->RegisterSampleBucket(
1353 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1354 if (!PlatformThread::Create(
1355 0, sampling_thread_.get(), &sampling_thread_handle_)) {
1356 DCHECK(false) << "failed to create thread";
1360 dispatching_to_observer_list_ = true;
1361 observer_list = enabled_state_observer_list_;
1363 // Notify observers outside the lock in case they trigger trace events.
1364 for (size_t i = 0; i < observer_list.size(); ++i)
1365 observer_list[i]->OnTraceLogEnabled();
1368 AutoLock lock(lock_);
1369 dispatching_to_observer_list_ = false;
1373 const CategoryFilter& TraceLog::GetCurrentCategoryFilter() {
1374 AutoLock lock(lock_);
1375 DCHECK(enable_count_ > 0);
1376 return category_filter_;
1379 void TraceLog::SetDisabled() {
1380 std::vector<EnabledStateObserver*> observer_list;
1382 AutoLock lock(lock_);
1383 DCHECK(enable_count_ > 0);
1385 if (--enable_count_ != 0)
1388 if (dispatching_to_observer_list_) {
1390 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1394 if (sampling_thread_.get()) {
1395 // Stop the sampling thread.
1396 sampling_thread_->Stop();
1398 PlatformThread::Join(sampling_thread_handle_);
1400 sampling_thread_handle_ = PlatformThreadHandle();
1401 sampling_thread_.reset();
1404 category_filter_.Clear();
1405 subtle::NoBarrier_Store(&watch_category_, 0);
1406 watch_event_name_ = "";
1407 UpdateCategoryGroupEnabledFlags();
1408 AddMetadataEventsWhileLocked();
1410 dispatching_to_observer_list_ = true;
1411 observer_list = enabled_state_observer_list_;
1414 // Dispatch to observers outside the lock in case the observer triggers a
1416 for (size_t i = 0; i < observer_list.size(); ++i)
1417 observer_list[i]->OnTraceLogDisabled();
1420 AutoLock lock(lock_);
1421 dispatching_to_observer_list_ = false;
1425 int TraceLog::GetNumTracesRecorded() {
1426 AutoLock lock(lock_);
1427 if (enable_count_ == 0)
1429 return num_traces_recorded_;
1432 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1433 enabled_state_observer_list_.push_back(listener);
1436 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1437 std::vector<EnabledStateObserver*>::iterator it =
1438 std::find(enabled_state_observer_list_.begin(),
1439 enabled_state_observer_list_.end(),
1441 if (it != enabled_state_observer_list_.end())
1442 enabled_state_observer_list_.erase(it);
1445 bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
1446 std::vector<EnabledStateObserver*>::const_iterator it =
1447 std::find(enabled_state_observer_list_.begin(),
1448 enabled_state_observer_list_.end(),
1450 return it != enabled_state_observer_list_.end();
1453 float TraceLog::GetBufferPercentFull() const {
1454 return static_cast<float>(static_cast<double>(logged_events_->Size()) /
1455 logged_events_->Capacity());
1458 void TraceLog::SetNotificationCallback(
1459 const TraceLog::NotificationCallback& cb) {
1460 AutoLock lock(lock_);
1461 notification_callback_ = cb;
1464 TraceBuffer* TraceLog::CreateTraceBuffer() {
1465 Options options = trace_options();
1466 if (options & RECORD_CONTINUOUSLY)
1467 return new TraceBufferRingBuffer(kTraceEventRingBufferChunks);
1468 else if (options & MONITOR_SAMPLING)
1469 return new TraceBufferRingBuffer(kMonitorTraceEventBufferChunks);
1470 else if (options & ECHO_TO_CONSOLE)
1471 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1472 return new TraceBufferVector();
1475 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1476 NotificationHelper* notifier, TraceEventHandle* handle) {
1477 lock_.AssertAcquired();
1479 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1480 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1481 thread_shared_chunk_.Pass());
1484 if (!thread_shared_chunk_) {
1485 thread_shared_chunk_ = logged_events_->GetChunk(
1486 &thread_shared_chunk_index_);
1488 CheckIfBufferIsFullWhileLocked(notifier);
1490 if (!thread_shared_chunk_)
1494 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
1495 if (trace_event && handle) {
1496 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_,
1497 event_index, handle);
1502 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) {
1503 lock_.AssertAcquired();
1504 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) {
1505 subtle::NoBarrier_Store(&buffer_is_full_,
1506 static_cast<subtle::AtomicWord>(1));
1507 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1511 void TraceLog::SetEventCallback(EventCallback cb) {
1512 subtle::NoBarrier_Store(&event_callback_,
1513 reinterpret_cast<subtle::AtomicWord>(cb));
1516 // Flush() works as the following:
1517 // 1. Flush() is called in threadA whose message loop is saved in
1518 // flush_message_loop_proxy_;
1519 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1520 // loop to flush the thread local buffers; otherwise finish the flush;
1521 // 3. FlushCurrentThread() deletes the thread local event buffer:
1522 // - The last batch of events of the thread are flushed into the main buffer;
1523 // - The message loop will be removed from thread_message_loops_;
1524 // If this is the last message loop, finish the flush;
1525 // 4. If any thread hasn't finish its flush in time, finish the flush.
1526 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1528 // Can't flush when tracing is enabled because otherwise PostTask would
1529 // - it generates more trace events;
1530 // - it deschedules the calling thread on some platforms causing inaccurate
1531 // timing of the trace events.
1532 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1534 cb.Run(empty_result, false);
1535 LOG(WARNING) << "Ignored TraceLog::Flush called when tracing is enabled";
1539 int generation = this->generation();
1541 AutoLock lock(lock_);
1542 DCHECK(!flush_message_loop_proxy_.get());
1543 flush_message_loop_proxy_ = MessageLoopProxy::current();
1544 DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1545 flush_output_callback_ = cb;
1547 if (thread_shared_chunk_) {
1548 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1549 thread_shared_chunk_.Pass());
1552 if (thread_message_loops_.size()) {
1553 for (hash_set<MessageLoop*>::const_iterator it =
1554 thread_message_loops_.begin();
1555 it != thread_message_loops_.end(); ++it) {
1558 Bind(&TraceLog::FlushCurrentThread, Unretained(this), generation));
1560 flush_message_loop_proxy_->PostDelayedTask(
1562 Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation),
1563 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1568 FinishFlush(generation);
1571 void TraceLog::ConvertTraceEventsToTraceFormat(
1572 scoped_ptr<TraceBuffer> logged_events,
1573 const TraceLog::OutputCallback& flush_output_callback) {
1575 if (flush_output_callback.is_null())
1578 // The callback need to be called at least once even if there is no events
1579 // to let the caller know the completion of flush.
1580 bool has_more_events = true;
1582 scoped_refptr<RefCountedString> json_events_str_ptr =
1583 new RefCountedString();
1585 for (size_t i = 0; i < kTraceEventBatchChunks; ++i) {
1586 const TraceBufferChunk* chunk = logged_events->NextChunk();
1588 has_more_events = false;
1591 for (size_t j = 0; j < chunk->size(); ++j) {
1593 json_events_str_ptr->data().append(",");
1594 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()));
1598 flush_output_callback.Run(json_events_str_ptr, has_more_events);
1599 } while (has_more_events);
1602 void TraceLog::FinishFlush(int generation) {
1603 scoped_ptr<TraceBuffer> previous_logged_events;
1604 OutputCallback flush_output_callback;
1606 if (!CheckGeneration(generation))
1610 AutoLock lock(lock_);
1612 previous_logged_events.swap(logged_events_);
1613 logged_events_.reset(CreateTraceBuffer());
1615 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1616 thread_message_loops_.clear();
1618 flush_message_loop_proxy_ = NULL;
1619 flush_output_callback = flush_output_callback_;
1620 flush_output_callback_.Reset();
1623 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1624 flush_output_callback);
1627 // Run in each thread holding a local event buffer.
1628 void TraceLog::FlushCurrentThread(int generation) {
1630 AutoLock lock(lock_);
1631 if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1632 // This is late. The corresponding flush has finished.
1637 // This will flush the thread local buffer.
1638 delete thread_local_event_buffer_.Get();
1640 AutoLock lock(lock_);
1641 if (!CheckGeneration(generation) || !flush_message_loop_proxy_ ||
1642 thread_message_loops_.size())
1645 flush_message_loop_proxy_->PostTask(
1647 Bind(&TraceLog::FinishFlush, Unretained(this), generation));
1650 void TraceLog::OnFlushTimeout(int generation) {
1652 AutoLock lock(lock_);
1653 if (!CheckGeneration(generation) || !flush_message_loop_proxy_) {
1654 // Flush has finished before timeout.
1659 "The following threads haven't finished flush in time. "
1660 "If this happens stably for some thread, please call "
1661 "TraceLog::GetInstance()->SetCurrentThreadBlocksMessageLoop() from "
1662 "the thread to avoid its trace events from being lost.";
1663 for (hash_set<MessageLoop*>::const_iterator it =
1664 thread_message_loops_.begin();
1665 it != thread_message_loops_.end(); ++it) {
1666 LOG(WARNING) << "Thread: " << (*it)->thread_name();
1669 FinishFlush(generation);
1672 void TraceLog::FlushButLeaveBufferIntact(
1673 const TraceLog::OutputCallback& flush_output_callback) {
1674 if (!sampling_thread_)
1677 scoped_ptr<TraceBuffer> previous_logged_events;
1679 AutoLock lock(lock_);
1680 AddMetadataEventsWhileLocked();
1681 if (thread_shared_chunk_) {
1682 // Return the chunk to the main buffer to flush the sampling data.
1683 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1684 thread_shared_chunk_.Pass());
1686 previous_logged_events = logged_events_->CloneForIteration().Pass();
1689 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1690 flush_output_callback);
1693 TraceEventHandle TraceLog::AddTraceEvent(
1695 const unsigned char* category_group_enabled,
1697 unsigned long long id,
1699 const char** arg_names,
1700 const unsigned char* arg_types,
1701 const unsigned long long* arg_values,
1702 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1703 unsigned char flags) {
1704 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1705 base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime();
1706 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1707 name, id, thread_id, now,
1708 num_args, arg_names,
1709 arg_types, arg_values,
1710 convertable_values, flags);
1713 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1715 const unsigned char* category_group_enabled,
1717 unsigned long long id,
1719 const TimeTicks& timestamp,
1721 const char** arg_names,
1722 const unsigned char* arg_types,
1723 const unsigned long long* arg_values,
1724 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1725 unsigned char flags) {
1726 TraceEventHandle handle = { 0, 0, 0 };
1727 if (!*category_group_enabled)
1732 if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1733 id ^= process_id_hash_;
1735 TimeTicks now = OffsetTimestamp(timestamp);
1736 TimeTicks thread_now = ThreadNow();
1738 NotificationHelper notifier(this);
1740 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1741 // A ThreadLocalEventBuffer needs the message loop
1742 // - to know when the thread exits;
1743 // - to handle the final flush.
1744 // For a thread without a message loop or the message loop may be blocked, the
1745 // trace events will be added into the main buffer directly.
1746 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1747 thread_local_event_buffer = thread_local_event_buffer_.Get();
1748 if (thread_local_event_buffer &&
1749 !CheckGeneration(thread_local_event_buffer->generation())) {
1750 delete thread_local_event_buffer;
1751 thread_local_event_buffer = NULL;
1753 if (!thread_local_event_buffer) {
1754 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1755 thread_local_event_buffer_.Set(thread_local_event_buffer);
1759 OptionalAutoLock lock(lock_);
1760 // Check and update the current thread name only if the event is for the
1761 // current thread to avoid locks in most cases.
1762 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1763 const char* new_name = ThreadIdNameManager::GetInstance()->
1765 // Check if the thread name has been set or changed since the previous
1766 // call (if any), but don't bother if the new name is empty. Note this will
1767 // not detect a thread name change within the same char* buffer address: we
1768 // favor common case performance over corner case correctness.
1769 if (new_name != g_current_thread_name.Get().Get() &&
1770 new_name && *new_name) {
1771 g_current_thread_name.Get().Set(new_name);
1773 lock.EnsureAcquired();
1774 hash_map<int, std::string>::iterator existing_name =
1775 thread_names_.find(thread_id);
1776 if (existing_name == thread_names_.end()) {
1777 // This is a new thread id, and a new name.
1778 thread_names_[thread_id] = new_name;
1780 // This is a thread id that we've seen before, but potentially with a
1782 std::vector<StringPiece> existing_names;
1783 Tokenize(existing_name->second, ",", &existing_names);
1784 bool found = std::find(existing_names.begin(),
1785 existing_names.end(),
1786 new_name) != existing_names.end();
1788 existing_name->second.push_back(',');
1789 existing_name->second.append(new_name);
1795 TraceEvent* trace_event = NULL;
1796 if (!subtle::NoBarrier_Load(&buffer_is_full_)) {
1797 if (thread_local_event_buffer) {
1798 lock.EnsureReleased();
1799 trace_event = thread_local_event_buffer->AddTraceEvent(¬ifier,
1802 lock.EnsureAcquired();
1803 trace_event = AddEventToThreadSharedChunkWhileLocked(¬ifier, &handle);
1807 trace_event->Initialize(thread_id, now, thread_now, phase,
1808 category_group_enabled, name, id,
1809 num_args, arg_names, arg_types, arg_values,
1810 convertable_values, flags);
1812 #if defined(OS_ANDROID)
1813 trace_event->SendToATrace();
1818 if (trace_options() & ECHO_TO_CONSOLE) {
1819 lock.EnsureAcquired();
1820 OutputEventToConsoleWhileLocked(
1821 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1822 timestamp, trace_event);
1825 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1826 &watch_category_)) == category_group_enabled) {
1827 lock.EnsureAcquired();
1828 if (watch_event_name_ == name)
1829 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1832 lock.EnsureReleased();
1833 EventCallback event_callback = reinterpret_cast<EventCallback>(
1834 subtle::NoBarrier_Load(&event_callback_));
1835 if (event_callback) {
1836 // TODO(wangxianzhu): Should send TRACE_EVENT_PHASE_COMPLETE directly to
1837 // clients if it is beneficial and feasible.
1839 phase == TRACE_EVENT_PHASE_COMPLETE ?
1840 TRACE_EVENT_PHASE_BEGIN : phase,
1841 category_group_enabled, name, id,
1842 num_args, arg_names, arg_types, arg_values,
1846 if (thread_local_event_buffer)
1847 thread_local_event_buffer->ReportOverhead(now, thread_now, ¬ifier);
1849 notifier.SendNotificationIfAny();
1854 // May be called when a COMPELETE event ends and the unfinished event has been
1855 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1856 void TraceLog::OutputEventToConsoleWhileLocked(unsigned char phase,
1857 const TimeTicks& timestamp,
1858 TraceEvent* trace_event) {
1859 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1860 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
1861 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
1862 lock_.AssertAcquired();
1865 int thread_id = trace_event ?
1866 trace_event->thread_id() : PlatformThread::CurrentId();
1867 if (phase == TRACE_EVENT_PHASE_END) {
1868 duration = timestamp - thread_event_start_times_[thread_id].top();
1869 thread_event_start_times_[thread_id].pop();
1872 std::string thread_name = thread_names_[thread_id];
1873 if (thread_colors_.find(thread_name) == thread_colors_.end())
1874 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1876 std::ostringstream log;
1877 log << base::StringPrintf("%s: \x1b[0;3%dm",
1878 thread_name.c_str(),
1879 thread_colors_[thread_name]);
1882 if (thread_event_start_times_.find(thread_id) !=
1883 thread_event_start_times_.end())
1884 depth = thread_event_start_times_[thread_id].size();
1886 for (size_t i = 0; i < depth; ++i)
1890 trace_event->AppendPrettyPrinted(&log);
1891 if (phase == TRACE_EVENT_PHASE_END)
1892 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1894 LOG(ERROR) << log.str() << "\x1b[0;m";
1896 if (phase == TRACE_EVENT_PHASE_BEGIN)
1897 thread_event_start_times_[thread_id].push(timestamp);
1900 void TraceLog::AddTraceEventEtw(char phase,
1903 const char* extra) {
1905 TraceEventETWProvider::Trace(name, phase, id, extra);
1907 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1908 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1911 void TraceLog::AddTraceEventEtw(char phase,
1914 const std::string& extra)
1917 TraceEventETWProvider::Trace(name, phase, id, extra);
1919 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1920 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1923 void TraceLog::UpdateTraceEventDuration(TraceEventHandle handle) {
1924 OptionalAutoLock lock(lock_);
1926 TimeTicks now = OffsetNow();
1927 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
1929 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1930 trace_event->UpdateDuration(now);
1931 #if defined(OS_ANDROID)
1932 trace_event->SendToATrace();
1936 if (trace_options() & ECHO_TO_CONSOLE) {
1937 lock.EnsureAcquired();
1938 OutputEventToConsoleWhileLocked(TRACE_EVENT_PHASE_END, now, trace_event);
1941 EventCallback event_callback = reinterpret_cast<EventCallback>(
1942 subtle::NoBarrier_Load(&event_callback_));
1943 if (event_callback && trace_event) {
1944 // The copy is needed when trace_event is from the main buffer in which case
1945 // the lock has been locked.
1946 TraceEvent event_copy;
1947 event_copy.CopyFrom(*trace_event);
1948 lock.EnsureReleased();
1949 // TODO(wangxianzhu): Should send TRACE_EVENT_PHASE_COMPLETE directly to
1950 // clients if it is beneficial and feasible.
1951 event_callback(now, TRACE_EVENT_PHASE_END,
1952 event_copy.category_group_enabled(),
1953 event_copy.name(), event_copy.id(),
1954 0, NULL, NULL, NULL, event_copy.flags());
1958 void TraceLog::SetWatchEvent(const std::string& category_name,
1959 const std::string& event_name) {
1960 const unsigned char* category = GetCategoryGroupEnabled(
1961 category_name.c_str());
1962 AutoLock lock(lock_);
1963 subtle::NoBarrier_Store(&watch_category_,
1964 reinterpret_cast<subtle::AtomicWord>(category));
1965 watch_event_name_ = event_name;
1968 void TraceLog::CancelWatchEvent() {
1969 AutoLock lock(lock_);
1970 subtle::NoBarrier_Store(&watch_category_, 0);
1971 watch_event_name_ = "";
1974 void TraceLog::AddMetadataEventsWhileLocked() {
1975 lock_.AssertAcquired();
1977 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1978 if (process_sort_index_ != 0) {
1979 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, NULL),
1981 "process_sort_index", "sort_index",
1982 process_sort_index_);
1985 if (process_name_.size()) {
1986 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, NULL),
1988 "process_name", "name",
1992 if (process_labels_.size() > 0) {
1993 std::vector<std::string> labels;
1994 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
1995 it != process_labels_.end();
1997 labels.push_back(it->second);
1999 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, NULL),
2001 "process_labels", "labels",
2002 JoinString(labels, ','));
2005 // Thread sort indices.
2006 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
2007 it != thread_sort_indices_.end();
2009 if (it->second == 0)
2011 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, NULL),
2013 "thread_sort_index", "sort_index",
2018 for(hash_map<int, std::string>::iterator it = thread_names_.begin();
2019 it != thread_names_.end();
2021 if (it->second.empty())
2023 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, NULL),
2025 "thread_name", "name",
2030 void TraceLog::InstallWaitableEventForSamplingTesting(
2031 WaitableEvent* waitable_event) {
2032 if (!sampling_thread_)
2034 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event);
2037 void TraceLog::DeleteForTesting() {
2038 DeleteTraceLogForTesting::Delete();
2041 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
2042 return GetEventByHandleInternal(handle, NULL);
2045 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
2046 OptionalAutoLock* lock) {
2047 if (!handle.chunk_seq)
2050 if (thread_local_event_buffer_.Get()) {
2051 TraceEvent* trace_event =
2052 thread_local_event_buffer_.Get()->GetEventByHandle(handle);
2057 // The event has been out-of-control of the thread local buffer.
2058 // Try to get the event from the main buffer with a lock.
2060 lock->EnsureAcquired();
2062 if (thread_shared_chunk_ &&
2063 handle.chunk_index == thread_shared_chunk_index_) {
2064 return handle.chunk_seq == thread_shared_chunk_->seq() ?
2065 thread_shared_chunk_->GetEventAt(handle.event_index) : NULL;
2068 return logged_events_->GetEventByHandle(handle);
2071 void TraceLog::SetProcessID(int process_id) {
2072 process_id_ = process_id;
2073 // Create a FNV hash from the process ID for XORing.
2074 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
2075 unsigned long long offset_basis = 14695981039346656037ull;
2076 unsigned long long fnv_prime = 1099511628211ull;
2077 unsigned long long pid = static_cast<unsigned long long>(process_id_);
2078 process_id_hash_ = (offset_basis ^ pid) * fnv_prime;
2081 void TraceLog::SetProcessSortIndex(int sort_index) {
2082 AutoLock lock(lock_);
2083 process_sort_index_ = sort_index;
2086 void TraceLog::SetProcessName(const std::string& process_name) {
2087 AutoLock lock(lock_);
2088 process_name_ = process_name;
2091 void TraceLog::UpdateProcessLabel(
2092 int label_id, const std::string& current_label) {
2093 if(!current_label.length())
2094 return RemoveProcessLabel(label_id);
2096 AutoLock lock(lock_);
2097 process_labels_[label_id] = current_label;
2100 void TraceLog::RemoveProcessLabel(int label_id) {
2101 AutoLock lock(lock_);
2102 base::hash_map<int, std::string>::iterator it = process_labels_.find(
2104 if (it == process_labels_.end())
2107 process_labels_.erase(it);
2110 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
2111 AutoLock lock(lock_);
2112 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
2115 void TraceLog::SetTimeOffset(TimeDelta offset) {
2116 time_offset_ = offset;
2119 size_t TraceLog::GetObserverCountForTest() const {
2120 return enabled_state_observer_list_.size();
2123 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2124 thread_blocks_message_loop_.Set(true);
2125 if (thread_local_event_buffer_.Get()) {
2126 // This will flush the thread local buffer.
2127 delete thread_local_event_buffer_.Get();
2131 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2132 const std::string& str) {
2133 return str.empty() ||
2135 str.at(str.length() - 1) == ' ';
2138 bool CategoryFilter::DoesCategoryGroupContainCategory(
2139 const char* category_group,
2140 const char* category) const {
2142 CStringTokenizer category_group_tokens(category_group,
2143 category_group + strlen(category_group), ",");
2144 while (category_group_tokens.GetNext()) {
2145 std::string category_group_token = category_group_tokens.token();
2146 // Don't allow empty tokens, nor tokens with leading or trailing space.
2147 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2148 category_group_token))
2149 << "Disallowed category string";
2150 if (MatchPattern(category_group_token.c_str(), category))
2156 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2157 if (!filter_string.empty())
2158 Initialize(filter_string);
2160 Initialize(CategoryFilter::kDefaultCategoryFilterString);
2163 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2164 : included_(cf.included_),
2165 disabled_(cf.disabled_),
2166 excluded_(cf.excluded_) {
2169 CategoryFilter::~CategoryFilter() {
2172 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2176 included_ = rhs.included_;
2177 disabled_ = rhs.disabled_;
2178 excluded_ = rhs.excluded_;
2182 void CategoryFilter::Initialize(const std::string& filter_string) {
2183 // Tokenize list of categories, delimited by ','.
2184 StringTokenizer tokens(filter_string, ",");
2185 // Add each token to the appropriate list (included_,excluded_).
2186 while (tokens.GetNext()) {
2187 std::string category = tokens.token();
2188 // Ignore empty categories.
2189 if (category.empty())
2191 // Excluded categories start with '-'.
2192 if (category.at(0) == '-') {
2193 // Remove '-' from category string.
2194 category = category.substr(1);
2195 excluded_.push_back(category);
2196 } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2197 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2198 disabled_.push_back(category);
2200 included_.push_back(category);
2205 void CategoryFilter::WriteString(const StringList& values,
2207 bool included) const {
2208 bool prepend_comma = !out->empty();
2210 for (StringList::const_iterator ci = values.begin();
2211 ci != values.end(); ++ci) {
2212 if (token_cnt > 0 || prepend_comma)
2213 StringAppendF(out, ",");
2214 StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2219 std::string CategoryFilter::ToString() const {
2220 std::string filter_string;
2221 WriteString(included_, &filter_string, true);
2222 WriteString(disabled_, &filter_string, true);
2223 WriteString(excluded_, &filter_string, false);
2224 return filter_string;
2227 bool CategoryFilter::IsCategoryGroupEnabled(
2228 const char* category_group_name) const {
2229 // TraceLog should call this method only as part of enabling/disabling
2231 StringList::const_iterator ci;
2233 // Check the disabled- filters and the disabled-* wildcard first so that a
2234 // "*" filter does not include the disabled.
2235 for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2236 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2239 if (DoesCategoryGroupContainCategory(category_group_name,
2240 TRACE_DISABLED_BY_DEFAULT("*")))
2243 for (ci = included_.begin(); ci != included_.end(); ++ci) {
2244 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2248 for (ci = excluded_.begin(); ci != excluded_.end(); ++ci) {
2249 if (DoesCategoryGroupContainCategory(category_group_name, ci->c_str()))
2252 // If the category group is not excluded, and there are no included patterns
2253 // we consider this pattern enabled.
2254 return included_.empty();
2257 bool CategoryFilter::HasIncludedPatterns() const {
2258 return !included_.empty();
2261 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2262 // Keep included patterns only if both filters have an included entry.
2263 // Otherwise, one of the filter was specifying "*" and we want to honour the
2265 if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2266 included_.insert(included_.end(),
2267 nested_filter.included_.begin(),
2268 nested_filter.included_.end());
2273 disabled_.insert(disabled_.end(),
2274 nested_filter.disabled_.begin(),
2275 nested_filter.disabled_.end());
2276 excluded_.insert(excluded_.end(),
2277 nested_filter.excluded_.begin(),
2278 nested_filter.excluded_.end());
2281 void CategoryFilter::Clear() {
2287 } // namespace debug
2290 namespace trace_event_internal {
2292 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2293 const char* category_group, const char* name) {
2294 // The single atom works because for now the category_group can only be "gpu".
2295 DCHECK(strcmp(category_group, "gpu") == 0);
2296 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2297 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2298 category_group, atomic, category_group_enabled_);
2299 if (*category_group_enabled_) {
2301 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2302 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2303 trace_event_internal::kNoEventId,
2304 static_cast<int>(base::PlatformThread::CurrentId()),
2305 base::TimeTicks::NowFromSystemTraceTime(),
2306 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2310 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2311 if (*category_group_enabled_)
2312 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(event_handle_);
2315 } // namespace trace_event_internal