perf: Fix sys_perf_event_open() race against self
authorPeter Zijlstra <peterz@infradead.org>
Fri, 20 May 2022 18:38:06 +0000 (20:38 +0200)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 May 2022 18:44:00 +0000 (08:44 -1000)
Norbert reported that it's possible to race sys_perf_event_open() such
that the looser ends up in another context from the group leader,
triggering many WARNs.

The move_group case checks for races against itself, but the
!move_group case doesn't, seemingly relying on the previous
group_leader->ctx == ctx check. However, that check is racy due to not
holding any locks at that time.

Therefore, re-check the result after acquiring locks and bailing
if they no longer match.

Additionally, clarify the not_move_group case from the
move_group-vs-move_group race.

Fixes: f63a8daa5812 ("perf: Fix event->ctx locking")
Reported-by: Norbert Slusarek <nslusarek@gmx.net>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/events/core.c

index 7858baf..7f1e4c5 100644 (file)
@@ -12217,6 +12217,9 @@ SYSCALL_DEFINE5(perf_event_open,
                 * Do not allow to attach to a group in a different task
                 * or CPU context. If we're moving SW events, we'll fix
                 * this up later, so allow that.
+                *
+                * Racy, not holding group_leader->ctx->mutex, see comment with
+                * perf_event_ctx_lock().
                 */
                if (!move_group && group_leader->ctx != ctx)
                        goto err_context;
@@ -12282,6 +12285,7 @@ SYSCALL_DEFINE5(perf_event_open,
                        } else {
                                perf_event_ctx_unlock(group_leader, gctx);
                                move_group = 0;
+                               goto not_move_group;
                        }
                }
 
@@ -12298,7 +12302,17 @@ SYSCALL_DEFINE5(perf_event_open,
                }
        } else {
                mutex_lock(&ctx->mutex);
+
+               /*
+                * Now that we hold ctx->lock, (re)validate group_leader->ctx == ctx,
+                * see the group_leader && !move_group test earlier.
+                */
+               if (group_leader && group_leader->ctx != ctx) {
+                       err = -EINVAL;
+                       goto err_locked;
+               }
        }
+not_move_group:
 
        if (ctx->task == TASK_TOMBSTONE) {
                err = -ESRCH;