perf tools: Unify page_size usage
[platform/adaptation/renesas_rcar/renesas_kernel.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include "evsel.h"
8 #include <math.h>
9
10 static bool hists__filter_entry_by_dso(struct hists *hists,
11                                        struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13                                           struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15                                           struct hist_entry *he);
16
17 enum hist_filter {
18         HIST_FILTER__DSO,
19         HIST_FILTER__THREAD,
20         HIST_FILTER__PARENT,
21         HIST_FILTER__SYMBOL,
22 };
23
24 struct callchain_param  callchain_param = {
25         .mode   = CHAIN_GRAPH_REL,
26         .min_percent = 0.5,
27         .order  = ORDER_CALLEE,
28         .key    = CCKEY_FUNCTION
29 };
30
31 u16 hists__col_len(struct hists *hists, enum hist_column col)
32 {
33         return hists->col_len[col];
34 }
35
36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
37 {
38         hists->col_len[col] = len;
39 }
40
41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
42 {
43         if (len > hists__col_len(hists, col)) {
44                 hists__set_col_len(hists, col, len);
45                 return true;
46         }
47         return false;
48 }
49
50 void hists__reset_col_len(struct hists *hists)
51 {
52         enum hist_column col;
53
54         for (col = 0; col < HISTC_NR_COLS; ++col)
55                 hists__set_col_len(hists, col, 0);
56 }
57
58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61
62         if (hists__col_len(hists, dso) < unresolved_col_width &&
63             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64             !symbol_conf.dso_list)
65                 hists__set_col_len(hists, dso, unresolved_col_width);
66 }
67
68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
69 {
70         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
71         int symlen;
72         u16 len;
73
74         /*
75          * +4 accounts for '[x] ' priv level info
76          * +2 accounts for 0x prefix on raw addresses
77          * +3 accounts for ' y ' symtab origin info
78          */
79         if (h->ms.sym) {
80                 symlen = h->ms.sym->namelen + 4;
81                 if (verbose)
82                         symlen += BITS_PER_LONG / 4 + 2 + 3;
83                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
84         } else {
85                 symlen = unresolved_col_width + 4 + 2;
86                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
87                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
88         }
89
90         len = thread__comm_len(h->thread);
91         if (hists__new_col_len(hists, HISTC_COMM, len))
92                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
93
94         if (h->ms.map) {
95                 len = dso__name_len(h->ms.map->dso);
96                 hists__new_col_len(hists, HISTC_DSO, len);
97         }
98
99         if (h->parent)
100                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
101
102         if (h->branch_info) {
103                 if (h->branch_info->from.sym) {
104                         symlen = (int)h->branch_info->from.sym->namelen + 4;
105                         if (verbose)
106                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
107                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
108
109                         symlen = dso__name_len(h->branch_info->from.map->dso);
110                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
111                 } else {
112                         symlen = unresolved_col_width + 4 + 2;
113                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
115                 }
116
117                 if (h->branch_info->to.sym) {
118                         symlen = (int)h->branch_info->to.sym->namelen + 4;
119                         if (verbose)
120                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
121                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
122
123                         symlen = dso__name_len(h->branch_info->to.map->dso);
124                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
125                 } else {
126                         symlen = unresolved_col_width + 4 + 2;
127                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
129                 }
130         }
131
132         if (h->mem_info) {
133                 if (h->mem_info->daddr.sym) {
134                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
135                                + unresolved_col_width + 2;
136                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
137                                            symlen);
138                 } else {
139                         symlen = unresolved_col_width + 4 + 2;
140                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
141                                            symlen);
142                 }
143                 if (h->mem_info->daddr.map) {
144                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
145                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
146                                            symlen);
147                 } else {
148                         symlen = unresolved_col_width + 4 + 2;
149                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150                 }
151         } else {
152                 symlen = unresolved_col_width + 4 + 2;
153                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
155         }
156
157         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
163
164         if (h->transaction)
165                 hists__new_col_len(hists, HISTC_TRANSACTION,
166                                    hist_entry__transaction_len());
167 }
168
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
170 {
171         struct rb_node *next = rb_first(&hists->entries);
172         struct hist_entry *n;
173         int row = 0;
174
175         hists__reset_col_len(hists);
176
177         while (next && row++ < max_rows) {
178                 n = rb_entry(next, struct hist_entry, rb_node);
179                 if (!n->filtered)
180                         hists__calc_col_len(hists, n);
181                 next = rb_next(&n->rb_node);
182         }
183 }
184
185 static void hist_entry__add_cpumode_period(struct hist_entry *he,
186                                            unsigned int cpumode, u64 period)
187 {
188         switch (cpumode) {
189         case PERF_RECORD_MISC_KERNEL:
190                 he->stat.period_sys += period;
191                 break;
192         case PERF_RECORD_MISC_USER:
193                 he->stat.period_us += period;
194                 break;
195         case PERF_RECORD_MISC_GUEST_KERNEL:
196                 he->stat.period_guest_sys += period;
197                 break;
198         case PERF_RECORD_MISC_GUEST_USER:
199                 he->stat.period_guest_us += period;
200                 break;
201         default:
202                 break;
203         }
204 }
205
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
207                                 u64 weight)
208 {
209
210         he_stat->period         += period;
211         he_stat->weight         += weight;
212         he_stat->nr_events      += 1;
213 }
214
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
216 {
217         dest->period            += src->period;
218         dest->period_sys        += src->period_sys;
219         dest->period_us         += src->period_us;
220         dest->period_guest_sys  += src->period_guest_sys;
221         dest->period_guest_us   += src->period_guest_us;
222         dest->nr_events         += src->nr_events;
223         dest->weight            += src->weight;
224 }
225
226 static void hist_entry__decay(struct hist_entry *he)
227 {
228         he->stat.period = (he->stat.period * 7) / 8;
229         he->stat.nr_events = (he->stat.nr_events * 7) / 8;
230         /* XXX need decay for weight too? */
231 }
232
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
234 {
235         u64 prev_period = he->stat.period;
236
237         if (prev_period == 0)
238                 return true;
239
240         hist_entry__decay(he);
241
242         if (!he->filtered)
243                 hists->stats.total_period -= prev_period - he->stat.period;
244
245         return he->stat.period == 0;
246 }
247
248 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
249 {
250         struct rb_node *next = rb_first(&hists->entries);
251         struct hist_entry *n;
252
253         while (next) {
254                 n = rb_entry(next, struct hist_entry, rb_node);
255                 next = rb_next(&n->rb_node);
256                 /*
257                  * We may be annotating this, for instance, so keep it here in
258                  * case some it gets new samples, we'll eventually free it when
259                  * the user stops browsing and it agains gets fully decayed.
260                  */
261                 if (((zap_user && n->level == '.') ||
262                      (zap_kernel && n->level != '.') ||
263                      hists__decay_entry(hists, n)) &&
264                     !n->used) {
265                         rb_erase(&n->rb_node, &hists->entries);
266
267                         if (sort__need_collapse)
268                                 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
269
270                         hist_entry__free(n);
271                         --hists->nr_entries;
272                 }
273         }
274 }
275
276 /*
277  * histogram, sorted on item, collects periods
278  */
279
280 static struct hist_entry *hist_entry__new(struct hist_entry *template)
281 {
282         size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
283         struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
284
285         if (he != NULL) {
286                 *he = *template;
287
288                 if (he->ms.map)
289                         he->ms.map->referenced = true;
290
291                 if (he->branch_info) {
292                         /*
293                          * This branch info is (a part of) allocated from
294                          * machine__resolve_bstack() and will be freed after
295                          * adding new entries.  So we need to save a copy.
296                          */
297                         he->branch_info = malloc(sizeof(*he->branch_info));
298                         if (he->branch_info == NULL) {
299                                 free(he);
300                                 return NULL;
301                         }
302
303                         memcpy(he->branch_info, template->branch_info,
304                                sizeof(*he->branch_info));
305
306                         if (he->branch_info->from.map)
307                                 he->branch_info->from.map->referenced = true;
308                         if (he->branch_info->to.map)
309                                 he->branch_info->to.map->referenced = true;
310                 }
311
312                 if (he->mem_info) {
313                         if (he->mem_info->iaddr.map)
314                                 he->mem_info->iaddr.map->referenced = true;
315                         if (he->mem_info->daddr.map)
316                                 he->mem_info->daddr.map->referenced = true;
317                 }
318
319                 if (symbol_conf.use_callchain)
320                         callchain_init(he->callchain);
321
322                 INIT_LIST_HEAD(&he->pairs.node);
323         }
324
325         return he;
326 }
327
328 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
329 {
330         if (!h->filtered) {
331                 hists__calc_col_len(hists, h);
332                 ++hists->nr_entries;
333                 hists->stats.total_period += h->stat.period;
334         }
335 }
336
337 static u8 symbol__parent_filter(const struct symbol *parent)
338 {
339         if (symbol_conf.exclude_other && parent == NULL)
340                 return 1 << HIST_FILTER__PARENT;
341         return 0;
342 }
343
344 static struct hist_entry *add_hist_entry(struct hists *hists,
345                                       struct hist_entry *entry,
346                                       struct addr_location *al,
347                                       u64 period,
348                                       u64 weight)
349 {
350         struct rb_node **p;
351         struct rb_node *parent = NULL;
352         struct hist_entry *he;
353         int64_t cmp;
354
355         p = &hists->entries_in->rb_node;
356
357         while (*p != NULL) {
358                 parent = *p;
359                 he = rb_entry(parent, struct hist_entry, rb_node_in);
360
361                 /*
362                  * Make sure that it receives arguments in a same order as
363                  * hist_entry__collapse() so that we can use an appropriate
364                  * function when searching an entry regardless which sort
365                  * keys were used.
366                  */
367                 cmp = hist_entry__cmp(he, entry);
368
369                 if (!cmp) {
370                         he_stat__add_period(&he->stat, period, weight);
371
372                         /*
373                          * This mem info was allocated from machine__resolve_mem
374                          * and will not be used anymore.
375                          */
376                         free(entry->mem_info);
377
378                         /* If the map of an existing hist_entry has
379                          * become out-of-date due to an exec() or
380                          * similar, update it.  Otherwise we will
381                          * mis-adjust symbol addresses when computing
382                          * the history counter to increment.
383                          */
384                         if (he->ms.map != entry->ms.map) {
385                                 he->ms.map = entry->ms.map;
386                                 if (he->ms.map)
387                                         he->ms.map->referenced = true;
388                         }
389                         goto out;
390                 }
391
392                 if (cmp < 0)
393                         p = &(*p)->rb_left;
394                 else
395                         p = &(*p)->rb_right;
396         }
397
398         he = hist_entry__new(entry);
399         if (!he)
400                 return NULL;
401
402         rb_link_node(&he->rb_node_in, parent, p);
403         rb_insert_color(&he->rb_node_in, hists->entries_in);
404 out:
405         hist_entry__add_cpumode_period(he, al->cpumode, period);
406         return he;
407 }
408
409 struct hist_entry *__hists__add_mem_entry(struct hists *self,
410                                           struct addr_location *al,
411                                           struct symbol *sym_parent,
412                                           struct mem_info *mi,
413                                           u64 period,
414                                           u64 weight)
415 {
416         struct hist_entry entry = {
417                 .thread = al->thread,
418                 .ms = {
419                         .map    = al->map,
420                         .sym    = al->sym,
421                 },
422                 .stat = {
423                         .period = period,
424                         .weight = weight,
425                         .nr_events = 1,
426                 },
427                 .cpu    = al->cpu,
428                 .ip     = al->addr,
429                 .level  = al->level,
430                 .parent = sym_parent,
431                 .filtered = symbol__parent_filter(sym_parent),
432                 .hists = self,
433                 .mem_info = mi,
434                 .branch_info = NULL,
435         };
436         return add_hist_entry(self, &entry, al, period, weight);
437 }
438
439 struct hist_entry *__hists__add_branch_entry(struct hists *self,
440                                              struct addr_location *al,
441                                              struct symbol *sym_parent,
442                                              struct branch_info *bi,
443                                              u64 period,
444                                              u64 weight)
445 {
446         struct hist_entry entry = {
447                 .thread = al->thread,
448                 .ms = {
449                         .map    = bi->to.map,
450                         .sym    = bi->to.sym,
451                 },
452                 .cpu    = al->cpu,
453                 .ip     = bi->to.addr,
454                 .level  = al->level,
455                 .stat = {
456                         .period = period,
457                         .nr_events = 1,
458                         .weight = weight,
459                 },
460                 .parent = sym_parent,
461                 .filtered = symbol__parent_filter(sym_parent),
462                 .branch_info = bi,
463                 .hists  = self,
464                 .mem_info = NULL,
465         };
466
467         return add_hist_entry(self, &entry, al, period, weight);
468 }
469
470 struct hist_entry *__hists__add_entry(struct hists *self,
471                                       struct addr_location *al,
472                                       struct symbol *sym_parent, u64 period,
473                                       u64 weight, u64 transaction)
474 {
475         struct hist_entry entry = {
476                 .thread = al->thread,
477                 .ms = {
478                         .map    = al->map,
479                         .sym    = al->sym,
480                 },
481                 .cpu    = al->cpu,
482                 .ip     = al->addr,
483                 .level  = al->level,
484                 .stat = {
485                         .period = period,
486                         .nr_events = 1,
487                         .weight = weight,
488                 },
489                 .parent = sym_parent,
490                 .filtered = symbol__parent_filter(sym_parent),
491                 .hists  = self,
492                 .branch_info = NULL,
493                 .mem_info = NULL,
494                 .transaction = transaction,
495         };
496
497         return add_hist_entry(self, &entry, al, period, weight);
498 }
499
500 int64_t
501 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
502 {
503         struct sort_entry *se;
504         int64_t cmp = 0;
505
506         list_for_each_entry(se, &hist_entry__sort_list, list) {
507                 cmp = se->se_cmp(left, right);
508                 if (cmp)
509                         break;
510         }
511
512         return cmp;
513 }
514
515 int64_t
516 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
517 {
518         struct sort_entry *se;
519         int64_t cmp = 0;
520
521         list_for_each_entry(se, &hist_entry__sort_list, list) {
522                 int64_t (*f)(struct hist_entry *, struct hist_entry *);
523
524                 f = se->se_collapse ?: se->se_cmp;
525
526                 cmp = f(left, right);
527                 if (cmp)
528                         break;
529         }
530
531         return cmp;
532 }
533
534 void hist_entry__free(struct hist_entry *he)
535 {
536         free(he->branch_info);
537         free(he->mem_info);
538         free(he);
539 }
540
541 /*
542  * collapse the histogram
543  */
544
545 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
546                                          struct rb_root *root,
547                                          struct hist_entry *he)
548 {
549         struct rb_node **p = &root->rb_node;
550         struct rb_node *parent = NULL;
551         struct hist_entry *iter;
552         int64_t cmp;
553
554         while (*p != NULL) {
555                 parent = *p;
556                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
557
558                 cmp = hist_entry__collapse(iter, he);
559
560                 if (!cmp) {
561                         he_stat__add_stat(&iter->stat, &he->stat);
562
563                         if (symbol_conf.use_callchain) {
564                                 callchain_cursor_reset(&callchain_cursor);
565                                 callchain_merge(&callchain_cursor,
566                                                 iter->callchain,
567                                                 he->callchain);
568                         }
569                         hist_entry__free(he);
570                         return false;
571                 }
572
573                 if (cmp < 0)
574                         p = &(*p)->rb_left;
575                 else
576                         p = &(*p)->rb_right;
577         }
578
579         rb_link_node(&he->rb_node_in, parent, p);
580         rb_insert_color(&he->rb_node_in, root);
581         return true;
582 }
583
584 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
585 {
586         struct rb_root *root;
587
588         pthread_mutex_lock(&hists->lock);
589
590         root = hists->entries_in;
591         if (++hists->entries_in > &hists->entries_in_array[1])
592                 hists->entries_in = &hists->entries_in_array[0];
593
594         pthread_mutex_unlock(&hists->lock);
595
596         return root;
597 }
598
599 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
600 {
601         hists__filter_entry_by_dso(hists, he);
602         hists__filter_entry_by_thread(hists, he);
603         hists__filter_entry_by_symbol(hists, he);
604 }
605
606 void hists__collapse_resort(struct hists *hists)
607 {
608         struct rb_root *root;
609         struct rb_node *next;
610         struct hist_entry *n;
611
612         if (!sort__need_collapse)
613                 return;
614
615         root = hists__get_rotate_entries_in(hists);
616         next = rb_first(root);
617
618         while (next) {
619                 if (session_done())
620                         break;
621                 n = rb_entry(next, struct hist_entry, rb_node_in);
622                 next = rb_next(&n->rb_node_in);
623
624                 rb_erase(&n->rb_node_in, root);
625                 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
626                         /*
627                          * If it wasn't combined with one of the entries already
628                          * collapsed, we need to apply the filters that may have
629                          * been set by, say, the hist_browser.
630                          */
631                         hists__apply_filters(hists, n);
632                 }
633         }
634 }
635
636 /*
637  * reverse the map, sort on period.
638  */
639
640 static int period_cmp(u64 period_a, u64 period_b)
641 {
642         if (period_a > period_b)
643                 return 1;
644         if (period_a < period_b)
645                 return -1;
646         return 0;
647 }
648
649 static int hist_entry__sort_on_period(struct hist_entry *a,
650                                       struct hist_entry *b)
651 {
652         int ret;
653         int i, nr_members;
654         struct perf_evsel *evsel;
655         struct hist_entry *pair;
656         u64 *periods_a, *periods_b;
657
658         ret = period_cmp(a->stat.period, b->stat.period);
659         if (ret || !symbol_conf.event_group)
660                 return ret;
661
662         evsel = hists_to_evsel(a->hists);
663         nr_members = evsel->nr_members;
664         if (nr_members <= 1)
665                 return ret;
666
667         periods_a = zalloc(sizeof(periods_a) * nr_members);
668         periods_b = zalloc(sizeof(periods_b) * nr_members);
669
670         if (!periods_a || !periods_b)
671                 goto out;
672
673         list_for_each_entry(pair, &a->pairs.head, pairs.node) {
674                 evsel = hists_to_evsel(pair->hists);
675                 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
676         }
677
678         list_for_each_entry(pair, &b->pairs.head, pairs.node) {
679                 evsel = hists_to_evsel(pair->hists);
680                 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
681         }
682
683         for (i = 1; i < nr_members; i++) {
684                 ret = period_cmp(periods_a[i], periods_b[i]);
685                 if (ret)
686                         break;
687         }
688
689 out:
690         free(periods_a);
691         free(periods_b);
692
693         return ret;
694 }
695
696 static void __hists__insert_output_entry(struct rb_root *entries,
697                                          struct hist_entry *he,
698                                          u64 min_callchain_hits)
699 {
700         struct rb_node **p = &entries->rb_node;
701         struct rb_node *parent = NULL;
702         struct hist_entry *iter;
703
704         if (symbol_conf.use_callchain)
705                 callchain_param.sort(&he->sorted_chain, he->callchain,
706                                       min_callchain_hits, &callchain_param);
707
708         while (*p != NULL) {
709                 parent = *p;
710                 iter = rb_entry(parent, struct hist_entry, rb_node);
711
712                 if (hist_entry__sort_on_period(he, iter) > 0)
713                         p = &(*p)->rb_left;
714                 else
715                         p = &(*p)->rb_right;
716         }
717
718         rb_link_node(&he->rb_node, parent, p);
719         rb_insert_color(&he->rb_node, entries);
720 }
721
722 void hists__output_resort(struct hists *hists)
723 {
724         struct rb_root *root;
725         struct rb_node *next;
726         struct hist_entry *n;
727         u64 min_callchain_hits;
728
729         min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
730
731         if (sort__need_collapse)
732                 root = &hists->entries_collapsed;
733         else
734                 root = hists->entries_in;
735
736         next = rb_first(root);
737         hists->entries = RB_ROOT;
738
739         hists->nr_entries = 0;
740         hists->stats.total_period = 0;
741         hists__reset_col_len(hists);
742
743         while (next) {
744                 n = rb_entry(next, struct hist_entry, rb_node_in);
745                 next = rb_next(&n->rb_node_in);
746
747                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
748                 hists__inc_nr_entries(hists, n);
749         }
750 }
751
752 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
753                                        enum hist_filter filter)
754 {
755         h->filtered &= ~(1 << filter);
756         if (h->filtered)
757                 return;
758
759         ++hists->nr_entries;
760         if (h->ms.unfolded)
761                 hists->nr_entries += h->nr_rows;
762         h->row_offset = 0;
763         hists->stats.total_period += h->stat.period;
764         hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
765
766         hists__calc_col_len(hists, h);
767 }
768
769
770 static bool hists__filter_entry_by_dso(struct hists *hists,
771                                        struct hist_entry *he)
772 {
773         if (hists->dso_filter != NULL &&
774             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
775                 he->filtered |= (1 << HIST_FILTER__DSO);
776                 return true;
777         }
778
779         return false;
780 }
781
782 void hists__filter_by_dso(struct hists *hists)
783 {
784         struct rb_node *nd;
785
786         hists->nr_entries = hists->stats.total_period = 0;
787         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
788         hists__reset_col_len(hists);
789
790         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
791                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
792
793                 if (symbol_conf.exclude_other && !h->parent)
794                         continue;
795
796                 if (hists__filter_entry_by_dso(hists, h))
797                         continue;
798
799                 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
800         }
801 }
802
803 static bool hists__filter_entry_by_thread(struct hists *hists,
804                                           struct hist_entry *he)
805 {
806         if (hists->thread_filter != NULL &&
807             he->thread != hists->thread_filter) {
808                 he->filtered |= (1 << HIST_FILTER__THREAD);
809                 return true;
810         }
811
812         return false;
813 }
814
815 void hists__filter_by_thread(struct hists *hists)
816 {
817         struct rb_node *nd;
818
819         hists->nr_entries = hists->stats.total_period = 0;
820         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
821         hists__reset_col_len(hists);
822
823         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
824                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
825
826                 if (hists__filter_entry_by_thread(hists, h))
827                         continue;
828
829                 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
830         }
831 }
832
833 static bool hists__filter_entry_by_symbol(struct hists *hists,
834                                           struct hist_entry *he)
835 {
836         if (hists->symbol_filter_str != NULL &&
837             (!he->ms.sym || strstr(he->ms.sym->name,
838                                    hists->symbol_filter_str) == NULL)) {
839                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
840                 return true;
841         }
842
843         return false;
844 }
845
846 void hists__filter_by_symbol(struct hists *hists)
847 {
848         struct rb_node *nd;
849
850         hists->nr_entries = hists->stats.total_period = 0;
851         hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
852         hists__reset_col_len(hists);
853
854         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
855                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
856
857                 if (hists__filter_entry_by_symbol(hists, h))
858                         continue;
859
860                 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
861         }
862 }
863
864 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
865 {
866         return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
867 }
868
869 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
870 {
871         return symbol__annotate(he->ms.sym, he->ms.map, privsize);
872 }
873
874 void events_stats__inc(struct events_stats *stats, u32 type)
875 {
876         ++stats->nr_events[0];
877         ++stats->nr_events[type];
878 }
879
880 void hists__inc_nr_events(struct hists *hists, u32 type)
881 {
882         events_stats__inc(&hists->stats, type);
883 }
884
885 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
886                                                  struct hist_entry *pair)
887 {
888         struct rb_root *root;
889         struct rb_node **p;
890         struct rb_node *parent = NULL;
891         struct hist_entry *he;
892         int64_t cmp;
893
894         if (sort__need_collapse)
895                 root = &hists->entries_collapsed;
896         else
897                 root = hists->entries_in;
898
899         p = &root->rb_node;
900
901         while (*p != NULL) {
902                 parent = *p;
903                 he = rb_entry(parent, struct hist_entry, rb_node_in);
904
905                 cmp = hist_entry__collapse(he, pair);
906
907                 if (!cmp)
908                         goto out;
909
910                 if (cmp < 0)
911                         p = &(*p)->rb_left;
912                 else
913                         p = &(*p)->rb_right;
914         }
915
916         he = hist_entry__new(pair);
917         if (he) {
918                 memset(&he->stat, 0, sizeof(he->stat));
919                 he->hists = hists;
920                 rb_link_node(&he->rb_node_in, parent, p);
921                 rb_insert_color(&he->rb_node_in, root);
922                 hists__inc_nr_entries(hists, he);
923                 he->dummy = true;
924         }
925 out:
926         return he;
927 }
928
929 static struct hist_entry *hists__find_entry(struct hists *hists,
930                                             struct hist_entry *he)
931 {
932         struct rb_node *n;
933
934         if (sort__need_collapse)
935                 n = hists->entries_collapsed.rb_node;
936         else
937                 n = hists->entries_in->rb_node;
938
939         while (n) {
940                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
941                 int64_t cmp = hist_entry__collapse(iter, he);
942
943                 if (cmp < 0)
944                         n = n->rb_left;
945                 else if (cmp > 0)
946                         n = n->rb_right;
947                 else
948                         return iter;
949         }
950
951         return NULL;
952 }
953
954 /*
955  * Look for pairs to link to the leader buckets (hist_entries):
956  */
957 void hists__match(struct hists *leader, struct hists *other)
958 {
959         struct rb_root *root;
960         struct rb_node *nd;
961         struct hist_entry *pos, *pair;
962
963         if (sort__need_collapse)
964                 root = &leader->entries_collapsed;
965         else
966                 root = leader->entries_in;
967
968         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
969                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
970                 pair = hists__find_entry(other, pos);
971
972                 if (pair)
973                         hist_entry__add_pair(pair, pos);
974         }
975 }
976
977 /*
978  * Look for entries in the other hists that are not present in the leader, if
979  * we find them, just add a dummy entry on the leader hists, with period=0,
980  * nr_events=0, to serve as the list header.
981  */
982 int hists__link(struct hists *leader, struct hists *other)
983 {
984         struct rb_root *root;
985         struct rb_node *nd;
986         struct hist_entry *pos, *pair;
987
988         if (sort__need_collapse)
989                 root = &other->entries_collapsed;
990         else
991                 root = other->entries_in;
992
993         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
994                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
995
996                 if (!hist_entry__has_pairs(pos)) {
997                         pair = hists__add_dummy_entry(leader, pos);
998                         if (pair == NULL)
999                                 return -1;
1000                         hist_entry__add_pair(pos, pair);
1001                 }
1002         }
1003
1004         return 0;
1005 }