perf/core: fix userpage->time_enabled of inactive events
authorSong Liu <songliubraving@fb.com>
Wed, 29 Sep 2021 19:43:13 +0000 (12:43 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 1 Oct 2021 11:57:54 +0000 (13:57 +0200)
Users of rdpmc rely on the mmapped user page to calculate accurate
time_enabled. Currently, userpage->time_enabled is only updated when the
event is added to the pmu. As a result, inactive event (due to counter
multiplexing) does not have accurate userpage->time_enabled. This can
be reproduced with something like:

   /* open 20 task perf_event "cycles", to create multiplexing */

   fd = perf_event_open();  /* open task perf_event "cycles" */
   userpage = mmap(fd);     /* use mmap and rdmpc */

   while (true) {
     time_enabled_mmap = xxx; /* use logic in perf_event_mmap_page */
     time_enabled_read = read(fd).time_enabled;
     if (time_enabled_mmap > time_enabled_read)
         BUG();
   }

Fix this by updating userpage for inactive events in merge_sched_in.

Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reported-and-tested-by: Lucian Grijincu <lucian@fb.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20210929194313.2398474-1-songliubraving@fb.com
include/linux/perf_event.h
kernel/events/core.c

index fe156a8..9b60bb8 100644 (file)
@@ -683,7 +683,9 @@ struct perf_event {
        /*
         * timestamp shadows the actual context timing but it can
         * be safely used in NMI interrupt context. It reflects the
-        * context time as it was when the event was last scheduled in.
+        * context time as it was when the event was last scheduled in,
+        * or when ctx_sched_in failed to schedule the event because we
+        * run out of PMC.
         *
         * ctx_time already accounts for ctx->timestamp. Therefore to
         * compute ctx_time for a sample, simply add perf_clock().
index 0c000cb..f23ca26 100644 (file)
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
        return 0;
 }
 
+static inline bool event_update_userpage(struct perf_event *event)
+{
+       if (likely(!atomic_read(&event->mmap_count)))
+               return false;
+
+       perf_event_update_time(event);
+       perf_set_shadow_time(event, event->ctx);
+       perf_event_update_userpage(event);
+
+       return true;
+}
+
+static inline void group_update_userpage(struct perf_event *group_event)
+{
+       struct perf_event *event;
+
+       if (!event_update_userpage(group_event))
+               return;
+
+       for_each_sibling_event(event, group_event)
+               event_update_userpage(event);
+}
+
 static int merge_sched_in(struct perf_event *event, void *data)
 {
        struct perf_event_context *ctx = event->ctx;
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
        }
 
        if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               *can_add_hw = 0;
                if (event->attr.pinned) {
                        perf_cgroup_event_disable(event, ctx);
                        perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+               } else {
+                       ctx->rotate_necessary = 1;
+                       perf_mux_hrtimer_restart(cpuctx);
+                       group_update_userpage(event);
                }
-
-               *can_add_hw = 0;
-               ctx->rotate_necessary = 1;
-               perf_mux_hrtimer_restart(cpuctx);
        }
 
        return 0;
@@ -6324,6 +6348,8 @@ accounting:
 
                ring_buffer_attach(event, rb);
 
+               perf_event_update_time(event);
+               perf_set_shadow_time(event, event->ctx);
                perf_event_init_userpage(event);
                perf_event_update_userpage(event);
        } else {