return 0;
}
+int perf_reader_consume(int num_readers, struct perf_reader **readers) {
+ int i;
+ for (i = 0; i < num_readers; ++i) {
+ perf_reader_event_read(readers[i]);
+ }
+ return 0;
+}
+
void perf_reader_set_fd(struct perf_reader *reader, int fd) {
reader->fd = fd;
}
int perf_reader_mmap(struct perf_reader *reader);
void perf_reader_event_read(struct perf_reader *reader);
int perf_reader_poll(int num_readers, struct perf_reader **readers, int timeout);
+int perf_reader_consume(int num_readers, struct perf_reader **readers);
int perf_reader_fd(struct perf_reader *reader);
void perf_reader_set_fd(struct perf_reader *reader, int fd);
readers[i] = v
lib.perf_reader_poll(len(readers), readers, timeout)
+ def perf_buffer_consume(self):
+ """perf_buffer_consume(self)
+
+ Consume all open perf buffers, regardless of whether or not
+ they currently contain events data. Necessary to catch 'remainder'
+ events when wakeup_events > 1 is set in open_perf_buffer
+ """
+ readers = (ct.c_void_p * len(self.perf_buffers))()
+ for i, v in enumerate(self.perf_buffers.values()):
+ readers[i] = v
+ lib.perf_reader_consume(len(readers), readers)
+
def kprobe_poll(self, timeout = -1):
"""kprobe_poll(self)
lib.bpf_open_perf_event.argtypes = [ct.c_uint, ct.c_ulonglong, ct.c_int, ct.c_int]
lib.perf_reader_poll.restype = ct.c_int
lib.perf_reader_poll.argtypes = [ct.c_int, ct.POINTER(ct.c_void_p), ct.c_int]
+lib.perf_reader_consume.restype = ct.c_int
+lib.perf_reader_consume.argtypes = [ct.c_int, ct.POINTER(ct.c_void_p)]
lib.perf_reader_free.restype = None
lib.perf_reader_free.argtypes = [ct.c_void_p]
lib.perf_reader_fd.restype = int