sched: Allow sched_core_put() from atomic context
authorPeter Zijlstra <peterz@infradead.org>
Mon, 29 Mar 2021 08:08:58 +0000 (10:08 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 12 May 2021 09:43:27 +0000 (11:43 +0200)
Stuff the meat of sched_core_put() into a work such that we can use
sched_core_put() from atomic context.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.377455632@infradead.org
kernel/sched/core.c

index 42c1c88..85147be 100644 (file)
@@ -102,7 +102,7 @@ DEFINE_STATIC_KEY_FALSE(__sched_core_enabled);
  */
 
 static DEFINE_MUTEX(sched_core_mutex);
-static int sched_core_count;
+static atomic_t sched_core_count;
 static struct cpumask sched_core_mask;
 
 static void __sched_core_flip(bool enabled)
@@ -170,18 +170,39 @@ static void __sched_core_disable(void)
 
 void sched_core_get(void)
 {
+       if (atomic_inc_not_zero(&sched_core_count))
+               return;
+
        mutex_lock(&sched_core_mutex);
-       if (!sched_core_count++)
+       if (!atomic_read(&sched_core_count))
                __sched_core_enable();
+
+       smp_mb__before_atomic();
+       atomic_inc(&sched_core_count);
        mutex_unlock(&sched_core_mutex);
 }
 
-void sched_core_put(void)
+static void __sched_core_put(struct work_struct *work)
 {
-       mutex_lock(&sched_core_mutex);
-       if (!--sched_core_count)
+       if (atomic_dec_and_mutex_lock(&sched_core_count, &sched_core_mutex)) {
                __sched_core_disable();
-       mutex_unlock(&sched_core_mutex);
+               mutex_unlock(&sched_core_mutex);
+       }
+}
+
+void sched_core_put(void)
+{
+       static DECLARE_WORK(_work, __sched_core_put);
+
+       /*
+        * "There can be only one"
+        *
+        * Either this is the last one, or we don't actually need to do any
+        * 'work'. If it is the last *again*, we rely on
+        * WORK_STRUCT_PENDING_BIT.
+        */
+       if (!atomic_add_unless(&sched_core_count, -1, 1))
+               schedule_work(&_work);
 }
 
 #endif /* CONFIG_SCHED_CORE */