static void open_counters(int cpu, pid_t pid)
{
- struct perf_counter_hw_event hw_event;
+ struct perf_counter_attr attr;
int counter, group_fd;
int track = 1;
group_fd = -1;
for (counter = 0; counter < nr_counters; counter++) {
- memset(&hw_event, 0, sizeof(hw_event));
- hw_event.config = event_id[counter];
- hw_event.irq_period = event_count[counter];
- hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID;
- hw_event.mmap = track;
- hw_event.comm = track;
- hw_event.inherit = (cpu < 0) && inherit;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = event_id[counter];
+ attr.sample_period = event_count[counter];
+ attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+ attr.mmap = track;
+ attr.comm = track;
+ attr.inherit = (cpu < 0) && inherit;
track = 0; // only the first counter needs these
fd[nr_cpu][counter] =
- sys_perf_counter_open(&hw_event, pid, cpu, group_fd, 0);
+ sys_perf_counter_open(&attr, pid, cpu, group_fd, 0);
if (fd[nr_cpu][counter] < 0) {
int err = errno;
static void create_perfstat_counter(int counter)
{
- struct perf_counter_hw_event hw_event;
+ struct perf_counter_attr attr;
- memset(&hw_event, 0, sizeof(hw_event));
- hw_event.config = event_id[counter];
- hw_event.record_type = 0;
- hw_event.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL;
- hw_event.exclude_user = event_mask[counter] & EVENT_MASK_USER;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = event_id[counter];
+ attr.sample_type = 0;
+ attr.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL;
+ attr.exclude_user = event_mask[counter] & EVENT_MASK_USER;
if (scale)
- hw_event.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
if (system_wide) {
int cpu;
for (cpu = 0; cpu < nr_cpus; cpu ++) {
- fd[cpu][counter] = sys_perf_counter_open(&hw_event, -1, cpu, -1, 0);
+ fd[cpu][counter] = sys_perf_counter_open(&attr, -1, cpu, -1, 0);
if (fd[cpu][counter] < 0) {
printf("perfstat error: syscall returned with %d (%s)\n",
fd[cpu][counter], strerror(errno));
}
}
} else {
- hw_event.inherit = inherit;
- hw_event.disabled = 1;
+ attr.inherit = inherit;
+ attr.disabled = 1;
- fd[0][counter] = sys_perf_counter_open(&hw_event, 0, -1, -1, 0);
+ fd[0][counter] = sys_perf_counter_open(&attr, 0, -1, -1, 0);
if (fd[0][counter] < 0) {
printf("perfstat error: syscall returned with %d (%s)\n",
fd[0][counter], strerror(errno));
old += size;
if (event->header.misc & PERF_EVENT_MISC_OVERFLOW) {
- if (event->header.type & PERF_RECORD_IP)
+ if (event->header.type & PERF_SAMPLE_IP)
process_event(event->ip.ip, md->counter);
} else {
switch (event->header.type) {
static int __cmd_top(void)
{
- struct perf_counter_hw_event hw_event;
+ struct perf_counter_attr attr;
pthread_t thread;
int i, counter, group_fd, nr_poll = 0;
unsigned int cpu;
if (target_pid == -1 && profile_cpu == -1)
cpu = i;
- memset(&hw_event, 0, sizeof(hw_event));
- hw_event.config = event_id[counter];
- hw_event.irq_period = event_count[counter];
- hw_event.record_type = PERF_RECORD_IP | PERF_RECORD_TID;
- hw_event.mmap = use_mmap;
- hw_event.munmap = use_munmap;
- hw_event.freq = freq;
+ memset(&attr, 0, sizeof(attr));
+ attr.config = event_id[counter];
+ attr.sample_period = event_count[counter];
+ attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+ attr.mmap = use_mmap;
+ attr.munmap = use_munmap;
+ attr.freq = freq;
- fd[i][counter] = sys_perf_counter_open(&hw_event, target_pid, cpu, group_fd, 0);
+ fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0);
if (fd[i][counter] < 0) {
int err = errno;
printf("kerneltop error: syscall returned with %d (%s)\n",
_min1 < _min2 ? _min1 : _min2; })
static inline int
-sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr,
+sys_perf_counter_open(struct perf_counter_attr *attr_uptr,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
- return syscall(__NR_perf_counter_open, hw_event_uptr, pid, cpu,
+ return syscall(__NR_perf_counter_open, attr_uptr, pid, cpu,
group_fd, flags);
}