for (i = 0; i < evlist->nr_mmaps; i++) {
map[i].core.fd = -1;
- map[i].overwrite = overwrite;
+ map[i].core.overwrite = overwrite;
/*
* When the perf_mmap() call is made we grab one refcount, plus
* one extra to let perf_mmap__consume() get the last
return NULL;
/* non-overwirte doesn't pause the ringbuffer */
- if (!map->overwrite)
+ if (!map->core.overwrite)
map->core.end = perf_mmap__read_head(map);
event = perf_mmap__read(map, &map->core.start, map->core.end);
- if (!map->overwrite)
+ if (!map->core.overwrite)
map->core.prev = map->core.start;
return event;
void perf_mmap__consume(struct mmap *map)
{
- if (!map->overwrite) {
+ if (!map->core.overwrite) {
u64 old = map->core.prev;
perf_mmap__write_tail(map, old);
unsigned char *data = md->core.base + page_size;
unsigned long size;
- md->core.start = md->overwrite ? head : old;
- md->core.end = md->overwrite ? old : head;
+ md->core.start = md->core.overwrite ? head : old;
+ md->core.end = md->core.overwrite ? old : head;
if ((md->core.end - md->core.start) < md->flush)
return -EAGAIN;
size = md->core.end - md->core.start;
if (size > (unsigned long)(md->core.mask) + 1) {
- if (!md->overwrite) {
+ if (!md->core.overwrite) {
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
md->core.prev = head;