return -1;
}
- session->hists.stats.total += data.period;
+ session->hists.stats.total_period += data.period;
return 0;
}
if (!hists)
return -ENOMEM;
- hists->stats.total += data->period;
+ hists->stats.total_period += data->period;
/*
* FIXME: add_event_total should be moved from here to
* perf_session__process_event so that the proper hist is passed to
* the event_op methods.
*/
hists__inc_nr_events(hists, PERF_RECORD_SAMPLE);
- session->hists.stats.total += data->period;
+ session->hists.stats.total_period += data->period;
return 0;
}
if (rb_first(&session->hists.entries) ==
rb_last(&session->hists.entries))
fprintf(stdout, "# Samples: %Ld\n#\n",
- hists->stats.total);
+ hists->stats.total_period);
else
fprintf(stdout, "# Samples: %Ld %s\n#\n",
- hists->stats.total,
+ hists->stats.total_period,
__event_name(hists->type, hists->config));
hists__fprintf(hists, NULL, false, stdout);
return 0;
}
-static int process_lost_event(event_t *event __used,
- struct perf_session *session __used)
-{
- nr_lost_chunks++;
- nr_lost_events += event->lost.lost;
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
.sample = process_sample_event,
.comm = event__process_comm,
- .lost = process_lost_event,
+ .lost = event__process_lost,
.ordered_samples = true,
};
if (session == NULL)
return -ENOMEM;
- if (perf_session__has_traces(session, "record -R"))
+ if (perf_session__has_traces(session, "record -R")) {
err = perf_session__process_events(session, &event_ops);
+ nr_events = session->hists.stats.nr_events[0];
+ nr_lost_events = session->hists.stats.total_lost;
+ nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
+ }
perf_session__delete(session);
return err;
data.time, thread->comm);
}
- session->hists.stats.total += data.period;
+ session->hists.stats.total_period += data.period;
return 0;
}
int event__process_lost(event_t *self, struct perf_session *session)
{
dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
- session->hists.stats.lost += self->lost.lost;
+ session->hists.stats.total_lost += self->lost.lost;
return 0;
}
struct hist_entry *n;
u64 min_callchain_hits;
- min_callchain_hits = self->stats.total * (callchain_param.min_percent / 100);
+ min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
tmp = RB_ROOT;
next = rb_first(&self->entries);
if (pair_hists) {
count = self->pair ? self->pair->count : 0;
- total = pair_hists->stats.total;
+ total = pair_hists->stats.total_period;
count_sys = self->pair ? self->pair->count_sys : 0;
count_us = self->pair ? self->pair->count_us : 0;
count_guest_sys = self->pair ? self->pair->count_guest_sys : 0;
++position;
}
ret += hist_entry__fprintf(h, pair, show_displacement,
- displacement, fp, self->stats.total);
+ displacement, fp, self->stats.total_period);
if (symbol_conf.use_callchain)
- ret += hist_entry__fprintf_callchain(h, fp, self->stats.total);
+ ret += hist_entry__fprintf_callchain(h, fp, self->stats.total_period);
if (h->ms.map == NULL && verbose > 1) {
__map_groups__fprintf_maps(&h->thread->mg,
{
struct rb_node *nd;
- self->nr_entries = self->stats.total = 0;
+ self->nr_entries = self->stats.total_period = 0;
self->max_sym_namelen = 0;
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
h->filtered &= ~(1 << HIST_FILTER__DSO);
if (!h->filtered) {
++self->nr_entries;
- self->stats.total += h->count;
+ self->stats.total_period += h->count;
if (h->ms.sym &&
self->max_sym_namelen < h->ms.sym->namelen)
self->max_sym_namelen = h->ms.sym->namelen;
{
struct rb_node *nd;
- self->nr_entries = self->stats.total = 0;
+ self->nr_entries = self->stats.total_period = 0;
self->max_sym_namelen = 0;
for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
h->filtered &= ~(1 << HIST_FILTER__THREAD);
if (!h->filtered) {
++self->nr_entries;
- self->stats.total += h->count;
+ self->stats.total_period += h->count;
if (h->ms.sym &&
self->max_sym_namelen < h->ms.sym->namelen)
self->max_sym_namelen = h->ms.sym->namelen;
void hists__inc_nr_events(struct hists *self, u32 type)
{
- ++self->hists.stats.nr_events[0];
- ++self->hists.stats.nr_events[type];
+ ++self->stats.nr_events[0];
+ ++self->stats.nr_events[type];
}
size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
struct sym_ext *ext;
};
+/*
+ * The kernel collects the number of events it couldn't send in a stretch and
+ * when possible sends this number in a PERF_RECORD_LOST event. The number of
+ * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
+ * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
+ * the sum of all struct lost_event.lost fields reported.
+ *
+ * The total_period is needed because by default auto-freq is used, so
+ * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * the total number of low level events, it is necessary to to sum all struct
+ * sample_event.period and stash the result in total_period.
+ */
struct events_stats {
- u64 total;
- u64 lost;
+ u64 total_period;
+ u64 total_lost;
u32 nr_events[PERF_RECORD_HEADER_MAX];
u32 nr_unknown_events;
};
}
snprintf(str, sizeof(str), "Samples: %Ld ",
- hists->stats.total);
+ hists->stats.total_period);
newtDrawRootText(0, 0, str);
newtGetScreenSize(NULL, &rows);
if (h->filtered)
continue;
- len = hist_entry__append_browser(h, self->tree, hists->stats.total);
+ len = hist_entry__append_browser(h, self->tree, hists->stats.total_period);
if (len > max_len)
max_len = len;
if (symbol_conf.use_callchain)
hist_entry__append_callchain_browser(h, self->tree,
- hists->stats.total, idx++);
+ hists->stats.total_period, idx++);
++curr_hist;
if (curr_hist % 5)
ui_progress__update(progress, curr_hist);
dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
offset + head, event->header.size,
event__name[event->header.type]);
- hists__inc_nr_events(self, event->header.type);
+ hists__inc_nr_events(&self->hists, event->header.type);
}
if (self->header.needs_swap && event__swap_ops[event->header.type])