Merge tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 17 Jul 2022 15:34:02 +0000 (08:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 17 Jul 2022 15:34:02 +0000 (08:34 -0700)
Pull perf fix from Borislav Petkov:

 - A single data race fix on the perf event cleanup path to avoid
   endless loops due to insufficient locking

* tag 'perf_urgent_for_v5.19_rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/core: Fix data race between perf_event_set_output() and perf_mmap_close()

kernel/events/core.c

index 80782cd..d2b3549 100644 (file)
@@ -6253,10 +6253,10 @@ again:
 
                if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
                        /*
-                        * Raced against perf_mmap_close() through
-                        * perf_event_set_output(). Try again, hope for better
-                        * luck.
+                        * Raced against perf_mmap_close(); remove the
+                        * event and try again.
                         */
+                       ring_buffer_attach(event, NULL);
                        mutex_unlock(&event->mmap_mutex);
                        goto again;
                }
@@ -11825,14 +11825,25 @@ err_size:
        goto out;
 }
 
+static void mutex_lock_double(struct mutex *a, struct mutex *b)
+{
+       if (b < a)
+               swap(a, b);
+
+       mutex_lock(a);
+       mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
+}
+
 static int
 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
 {
        struct perf_buffer *rb = NULL;
        int ret = -EINVAL;
 
-       if (!output_event)
+       if (!output_event) {
+               mutex_lock(&event->mmap_mutex);
                goto set;
+       }
 
        /* don't allow circular references */
        if (event == output_event)
@@ -11870,8 +11881,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
            event->pmu != output_event->pmu)
                goto out;
 
+       /*
+        * Hold both mmap_mutex to serialize against perf_mmap_close().  Since
+        * output_event is already on rb->event_list, and the list iteration
+        * restarts after every removal, it is guaranteed this new event is
+        * observed *OR* if output_event is already removed, it's guaranteed we
+        * observe !rb->mmap_count.
+        */
+       mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex);
 set:
-       mutex_lock(&event->mmap_mutex);
        /* Can't redirect output if we've got an active mmap() */
        if (atomic_read(&event->mmap_count))
                goto unlock;
@@ -11881,6 +11899,12 @@ set:
                rb = ring_buffer_get(output_event);
                if (!rb)
                        goto unlock;
+
+               /* did we race against perf_mmap_close() */
+               if (!atomic_read(&rb->mmap_count)) {
+                       ring_buffer_put(rb);
+                       goto unlock;
+               }
        }
 
        ring_buffer_attach(event, rb);
@@ -11888,20 +11912,13 @@ set:
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
+       if (output_event)
+               mutex_unlock(&output_event->mmap_mutex);
 
 out:
        return ret;
 }
 
-static void mutex_lock_double(struct mutex *a, struct mutex *b)
-{
-       if (b < a)
-               swap(a, b);
-
-       mutex_lock(a);
-       mutex_lock_nested(b, SINGLE_DEPTH_NESTING);
-}
-
 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
 {
        bool nmi_safe = false;