- add sources.
[platform/framework/web/crosswalk.git] / src / base / metrics / statistics_recorder.cc
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/metrics/statistics_recorder.h"
6
7 #include "base/at_exit.h"
8 #include "base/debug/leak_annotations.h"
9 #include "base/logging.h"
10 #include "base/memory/scoped_ptr.h"
11 #include "base/metrics/histogram.h"
12 #include "base/strings/stringprintf.h"
13 #include "base/synchronization/lock.h"
14
15 using std::list;
16 using std::string;
17
18 namespace {
19 // Initialize histogram statistics gathering system.
20 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
21     LAZY_INSTANCE_INITIALIZER;
22 }  // namespace
23
24 namespace base {
25
26 // static
27 void StatisticsRecorder::Initialize() {
28   // Ensure that an instance of the StatisticsRecorder object is created.
29   g_statistics_recorder_.Get();
30 }
31
32
33 // static
34 bool StatisticsRecorder::IsActive() {
35   if (lock_ == NULL)
36     return false;
37   base::AutoLock auto_lock(*lock_);
38   return NULL != histograms_;
39 }
40
41 // static
42 HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
43     HistogramBase* histogram) {
44   // As per crbug.com/79322 the histograms are intentionally leaked, so we need
45   // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
46   // for an object, the duplicates should not be annotated.
47   // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
48   // twice if (lock_ == NULL) || (!histograms_).
49   if (lock_ == NULL) {
50     ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
51     return histogram;
52   }
53
54   HistogramBase* histogram_to_delete = NULL;
55   HistogramBase* histogram_to_return = NULL;
56   {
57     base::AutoLock auto_lock(*lock_);
58     if (histograms_ == NULL) {
59       histogram_to_return = histogram;
60     } else {
61       const string& name = histogram->histogram_name();
62       HistogramMap::iterator it = histograms_->find(name);
63       if (histograms_->end() == it) {
64         (*histograms_)[name] = histogram;
65         ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
66         histogram_to_return = histogram;
67       } else if (histogram == it->second) {
68         // The histogram was registered before.
69         histogram_to_return = histogram;
70       } else {
71         // We already have one histogram with this name.
72         histogram_to_return = it->second;
73         histogram_to_delete = histogram;
74       }
75     }
76   }
77   delete histogram_to_delete;
78   return histogram_to_return;
79 }
80
81 // static
82 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
83     const BucketRanges* ranges) {
84   DCHECK(ranges->HasValidChecksum());
85   scoped_ptr<const BucketRanges> ranges_deleter;
86
87   if (lock_ == NULL) {
88     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
89     return ranges;
90   }
91
92   base::AutoLock auto_lock(*lock_);
93   if (ranges_ == NULL) {
94     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
95     return ranges;
96   }
97
98   list<const BucketRanges*>* checksum_matching_list;
99   RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
100   if (ranges_->end() == ranges_it) {
101     // Add a new matching list to map.
102     checksum_matching_list = new list<const BucketRanges*>();
103     ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
104     (*ranges_)[ranges->checksum()] = checksum_matching_list;
105   } else {
106     checksum_matching_list = ranges_it->second;
107   }
108
109   list<const BucketRanges*>::iterator checksum_matching_list_it;
110   for (checksum_matching_list_it = checksum_matching_list->begin();
111        checksum_matching_list_it != checksum_matching_list->end();
112        ++checksum_matching_list_it) {
113     const BucketRanges* existing_ranges = *checksum_matching_list_it;
114     if (existing_ranges->Equals(ranges)) {
115       if (existing_ranges == ranges) {
116         return ranges;
117       } else {
118         ranges_deleter.reset(ranges);
119         return existing_ranges;
120       }
121     }
122   }
123   // We haven't found a BucketRanges which has the same ranges. Register the
124   // new BucketRanges.
125   checksum_matching_list->push_front(ranges);
126   return ranges;
127 }
128
129 // static
130 void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
131                                         std::string* output) {
132   if (!IsActive())
133     return;
134
135   Histograms snapshot;
136   GetSnapshot(query, &snapshot);
137   for (Histograms::iterator it = snapshot.begin();
138        it != snapshot.end();
139        ++it) {
140     (*it)->WriteHTMLGraph(output);
141     output->append("<br><hr><br>");
142   }
143 }
144
145 // static
146 void StatisticsRecorder::WriteGraph(const std::string& query,
147                                     std::string* output) {
148   if (!IsActive())
149     return;
150   if (query.length())
151     StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
152   else
153     output->append("Collections of all histograms\n");
154
155   Histograms snapshot;
156   GetSnapshot(query, &snapshot);
157   for (Histograms::iterator it = snapshot.begin();
158        it != snapshot.end();
159        ++it) {
160     (*it)->WriteAscii(output);
161     output->append("\n");
162   }
163 }
164
165 // static
166 void StatisticsRecorder::GetHistograms(Histograms* output) {
167   if (lock_ == NULL)
168     return;
169   base::AutoLock auto_lock(*lock_);
170   if (histograms_ == NULL)
171     return;
172
173   for (HistogramMap::iterator it = histograms_->begin();
174        histograms_->end() != it;
175        ++it) {
176     DCHECK_EQ(it->first, it->second->histogram_name());
177     output->push_back(it->second);
178   }
179 }
180
181 // static
182 void StatisticsRecorder::GetBucketRanges(
183     std::vector<const BucketRanges*>* output) {
184   if (lock_ == NULL)
185     return;
186   base::AutoLock auto_lock(*lock_);
187   if (ranges_ == NULL)
188     return;
189
190   for (RangesMap::iterator it = ranges_->begin();
191        ranges_->end() != it;
192        ++it) {
193     list<const BucketRanges*>* ranges_list = it->second;
194     list<const BucketRanges*>::iterator ranges_list_it;
195     for (ranges_list_it = ranges_list->begin();
196          ranges_list_it != ranges_list->end();
197          ++ranges_list_it) {
198       output->push_back(*ranges_list_it);
199     }
200   }
201 }
202
203 // static
204 HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
205   if (lock_ == NULL)
206     return NULL;
207   base::AutoLock auto_lock(*lock_);
208   if (histograms_ == NULL)
209     return NULL;
210
211   HistogramMap::iterator it = histograms_->find(name);
212   if (histograms_->end() == it)
213     return NULL;
214   return it->second;
215 }
216
217 // private static
218 void StatisticsRecorder::GetSnapshot(const std::string& query,
219                                      Histograms* snapshot) {
220   if (lock_ == NULL)
221     return;
222   base::AutoLock auto_lock(*lock_);
223   if (histograms_ == NULL)
224     return;
225
226   for (HistogramMap::iterator it = histograms_->begin();
227        histograms_->end() != it;
228        ++it) {
229     if (it->first.find(query) != std::string::npos)
230       snapshot->push_back(it->second);
231   }
232 }
233
234 // This singleton instance should be started during the single threaded portion
235 // of main(), and hence it is not thread safe.  It initializes globals to
236 // provide support for all future calls.
237 StatisticsRecorder::StatisticsRecorder() {
238   DCHECK(!histograms_);
239   if (lock_ == NULL) {
240     // This will leak on purpose. It's the only way to make sure we won't race
241     // against the static uninitialization of the module while one of our
242     // static methods relying on the lock get called at an inappropriate time
243     // during the termination phase. Since it's a static data member, we will
244     // leak one per process, which would be similar to the instance allocated
245     // during static initialization and released only on  process termination.
246     lock_ = new base::Lock;
247   }
248   base::AutoLock auto_lock(*lock_);
249   histograms_ = new HistogramMap;
250   ranges_ = new RangesMap;
251
252   if (VLOG_IS_ON(1))
253     AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
254 }
255
256 // static
257 void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
258   DCHECK(VLOG_IS_ON(1));
259
260   StatisticsRecorder* me = reinterpret_cast<StatisticsRecorder*>(instance);
261   string output;
262   me->WriteGraph(std::string(), &output);
263   VLOG(1) << output;
264 }
265
266 StatisticsRecorder::~StatisticsRecorder() {
267   DCHECK(histograms_ && ranges_ && lock_);
268
269   // Clean up.
270   scoped_ptr<HistogramMap> histograms_deleter;
271   scoped_ptr<RangesMap> ranges_deleter;
272   // We don't delete lock_ on purpose to avoid having to properly protect
273   // against it going away after we checked for NULL in the static methods.
274   {
275     base::AutoLock auto_lock(*lock_);
276     histograms_deleter.reset(histograms_);
277     ranges_deleter.reset(ranges_);
278     histograms_ = NULL;
279     ranges_ = NULL;
280   }
281   // We are going to leak the histograms and the ranges.
282 }
283
284
285 // static
286 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
287 // static
288 StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
289 // static
290 base::Lock* StatisticsRecorder::lock_ = NULL;
291
292 }  // namespace base