perf diff: Use internal rb tree for compute resort
authorNamhyung Kim <namhyung.kim@lge.com>
Mon, 10 Dec 2012 08:29:56 +0000 (17:29 +0900)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 24 Jan 2013 19:40:06 +0000 (16:40 -0300)
There's no reason to run hists_compute_resort() using output tree.
Convert it to use internal tree so that it can remove unnecessary
_output_resort.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1355128197-18193-4-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-diff.c
tools/perf/util/hist.c
tools/perf/util/hist.h

index 8b896f5..4af0b58 100644 (file)
@@ -414,19 +414,30 @@ static void insert_hist_entry_by_compute(struct rb_root *root,
 
 static void hists__compute_resort(struct hists *hists)
 {
-       struct rb_root tmp = RB_ROOT;
-       struct rb_node *next = rb_first(&hists->entries);
+       struct rb_root *root;
+       struct rb_node *next;
+
+       if (sort__need_collapse)
+               root = &hists->entries_collapsed;
+       else
+               root = hists->entries_in;
+
+       hists->entries = RB_ROOT;
+       next = rb_first(root);
+
+       hists->nr_entries = 0;
+       hists->stats.total_period = 0;
+       hists__reset_col_len(hists);
 
        while (next != NULL) {
-               struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node);
+               struct hist_entry *he;
 
-               next = rb_next(&he->rb_node);
+               he = rb_entry(next, struct hist_entry, rb_node_in);
+               next = rb_next(&he->rb_node_in);
 
-               rb_erase(&he->rb_node, &hists->entries);
-               insert_hist_entry_by_compute(&tmp, he, compute);
+               insert_hist_entry_by_compute(&hists->entries, he, compute);
+               hists__inc_nr_entries(hists, he);
        }
-
-       hists->entries = tmp;
 }
 
 static void hists__process(struct hists *old, struct hists *new)
@@ -438,11 +449,11 @@ static void hists__process(struct hists *old, struct hists *new)
        else
                hists__link(new, old);
 
-       hists__output_resort(new);
-
        if (sort_compute) {
                hists__precompute(new);
                hists__compute_resort(new);
+       } else {
+               hists__output_resort(new);
        }
 
        hists__fprintf(new, true, 0, 0, stdout);
index 8ff3c2f..37179af 100644 (file)
@@ -251,7 +251,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template)
        return he;
 }
 
-static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
+void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
 {
        if (!h->filtered) {
                hists__calc_col_len(hists, h);
index 5b3b007..d3664ab 100644 (file)
@@ -96,6 +96,7 @@ void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
                                   bool zap_kernel);
 void hists__output_recalc_col_len(struct hists *hists, int max_rows);
 
+void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
 void hists__inc_nr_events(struct hists *self, u32 type);
 size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);