sched: Simplify sched_core_cpu_{starting,deactivate}()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 1 Aug 2023 20:41:30 +0000 (22:41 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 14 Aug 2023 15:01:27 +0000 (17:01 +0200)
Use guards to reduce gotos and simplify control flow.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lore.kernel.org/r/20230801211812.371787909@infradead.org
kernel/sched/core.c

index f113a44..efe3848 100644 (file)
@@ -6400,20 +6400,24 @@ static void queue_core_balance(struct rq *rq)
        queue_balance_callback(rq, &per_cpu(core_balance_head, rq->cpu), sched_core_balance);
 }
 
+DEFINE_LOCK_GUARD_1(core_lock, int,
+                   sched_core_lock(*_T->lock, &_T->flags),
+                   sched_core_unlock(*_T->lock, &_T->flags),
+                   unsigned long flags)
+
 static void sched_core_cpu_starting(unsigned int cpu)
 {
        const struct cpumask *smt_mask = cpu_smt_mask(cpu);
        struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
-       unsigned long flags;
        int t;
 
-       sched_core_lock(cpu, &flags);
+       guard(core_lock)(&cpu);
 
        WARN_ON_ONCE(rq->core != rq);
 
        /* if we're the first, we'll be our own leader */
        if (cpumask_weight(smt_mask) == 1)
-               goto unlock;
+               return;
 
        /* find the leader */
        for_each_cpu(t, smt_mask) {
@@ -6427,7 +6431,7 @@ static void sched_core_cpu_starting(unsigned int cpu)
        }
 
        if (WARN_ON_ONCE(!core_rq)) /* whoopsie */
-               goto unlock;
+               return;
 
        /* install and validate core_rq */
        for_each_cpu(t, smt_mask) {
@@ -6438,29 +6442,25 @@ static void sched_core_cpu_starting(unsigned int cpu)
 
                WARN_ON_ONCE(rq->core != core_rq);
        }
-
-unlock:
-       sched_core_unlock(cpu, &flags);
 }
 
 static void sched_core_cpu_deactivate(unsigned int cpu)
 {
        const struct cpumask *smt_mask = cpu_smt_mask(cpu);
        struct rq *rq = cpu_rq(cpu), *core_rq = NULL;
-       unsigned long flags;
        int t;
 
-       sched_core_lock(cpu, &flags);
+       guard(core_lock)(&cpu);
 
        /* if we're the last man standing, nothing to do */
        if (cpumask_weight(smt_mask) == 1) {
                WARN_ON_ONCE(rq->core != rq);
-               goto unlock;
+               return;
        }
 
        /* if we're not the leader, nothing to do */
        if (rq->core != rq)
-               goto unlock;
+               return;
 
        /* find a new leader */
        for_each_cpu(t, smt_mask) {
@@ -6471,7 +6471,7 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
        }
 
        if (WARN_ON_ONCE(!core_rq)) /* impossible */
-               goto unlock;
+               return;
 
        /* copy the shared state to the new leader */
        core_rq->core_task_seq             = rq->core_task_seq;
@@ -6493,9 +6493,6 @@ static void sched_core_cpu_deactivate(unsigned int cpu)
                rq = cpu_rq(t);
                rq->core = core_rq;
        }
-
-unlock:
-       sched_core_unlock(cpu, &flags);
 }
 
 static inline void sched_core_cpu_dying(unsigned int cpu)