workqueue: Factorize unbind/rebind_workers() logic
authorValentin Schneider <vschneid@redhat.com>
Thu, 12 Jan 2023 16:14:28 +0000 (16:14 +0000)
committerTejun Heo <tj@kernel.org>
Thu, 12 Jan 2023 16:21:49 +0000 (06:21 -1000)
Later patches will reuse this code, move it into reusable functions.

Signed-off-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index 55cca6c..f1386be 100644 (file)
@@ -1976,6 +1976,23 @@ fail:
        return NULL;
 }
 
+static void unbind_worker(struct worker *worker)
+{
+       lockdep_assert_held(&wq_pool_attach_mutex);
+
+       kthread_set_per_cpu(worker->task, -1);
+       if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
+       else
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+}
+
+static void rebind_worker(struct worker *worker, struct worker_pool *pool)
+{
+       kthread_set_per_cpu(worker->task, pool->cpu);
+       WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask) < 0);
+}
+
 /**
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
@@ -5051,13 +5068,8 @@ static void unbind_workers(int cpu)
 
                raw_spin_unlock_irq(&pool->lock);
 
-               for_each_pool_worker(worker, pool) {
-                       kthread_set_per_cpu(worker->task, -1);
-                       if (cpumask_intersects(wq_unbound_cpumask, cpu_active_mask))
-                               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
-                       else
-                               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
-               }
+               for_each_pool_worker(worker, pool)
+                       unbind_worker(worker);
 
                mutex_unlock(&wq_pool_attach_mutex);
        }
@@ -5082,11 +5094,8 @@ static void rebind_workers(struct worker_pool *pool)
         * of all workers first and then clear UNBOUND.  As we're called
         * from CPU_ONLINE, the following shouldn't fail.
         */
-       for_each_pool_worker(worker, pool) {
-               kthread_set_per_cpu(worker->task, pool->cpu);
-               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
-                                                 pool->attrs->cpumask) < 0);
-       }
+       for_each_pool_worker(worker, pool)
+               rebind_worker(worker, pool);
 
        raw_spin_lock_irq(&pool->lock);