async: Split async_schedule_node_domain()
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Wed, 27 Dec 2023 20:37:02 +0000 (21:37 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Feb 2024 00:18:49 +0000 (16:18 -0800)
commit 6aa09a5bccd8e224d917afdb4c278fc66aacde4d upstream.

In preparation for subsequent changes, split async_schedule_node_domain()
in two pieces so as to allow the bottom part of it to be called from a
somewhat different code path.

No functional impact.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Tested-by: Youngmin Nam <youngmin.nam@samsung.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/async.c

index b2c4ba5..cffe6b4 100644 (file)
@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
        wake_up(&async_done);
 }
 
+static async_cookie_t __async_schedule_node_domain(async_func_t func,
+                                                  void *data, int node,
+                                                  struct async_domain *domain,
+                                                  struct async_entry *entry)
+{
+       async_cookie_t newcookie;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&entry->domain_list);
+       INIT_LIST_HEAD(&entry->global_list);
+       INIT_WORK(&entry->work, async_run_entry_fn);
+       entry->func = func;
+       entry->data = data;
+       entry->domain = domain;
+
+       spin_lock_irqsave(&async_lock, flags);
+
+       /* allocate cookie and queue */
+       newcookie = entry->cookie = next_cookie++;
+
+       list_add_tail(&entry->domain_list, &domain->pending);
+       if (domain->registered)
+               list_add_tail(&entry->global_list, &async_global_pending);
+
+       atomic_inc(&entry_count);
+       spin_unlock_irqrestore(&async_lock, flags);
+
+       /* schedule for execution */
+       queue_work_node(node, system_unbound_wq, &entry->work);
+
+       return newcookie;
+}
+
 /**
  * async_schedule_node_domain - NUMA specific version of async_schedule_domain
  * @func: function to execute asynchronously
@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
                func(data, newcookie);
                return newcookie;
        }
-       INIT_LIST_HEAD(&entry->domain_list);
-       INIT_LIST_HEAD(&entry->global_list);
-       INIT_WORK(&entry->work, async_run_entry_fn);
-       entry->func = func;
-       entry->data = data;
-       entry->domain = domain;
-
-       spin_lock_irqsave(&async_lock, flags);
 
-       /* allocate cookie and queue */
-       newcookie = entry->cookie = next_cookie++;
-
-       list_add_tail(&entry->domain_list, &domain->pending);
-       if (domain->registered)
-               list_add_tail(&entry->global_list, &async_global_pending);
-
-       atomic_inc(&entry_count);
-       spin_unlock_irqrestore(&async_lock, flags);
-
-       /* schedule for execution */
-       queue_work_node(node, system_unbound_wq, &entry->work);
-
-       return newcookie;
+       return __async_schedule_node_domain(func, data, node, domain, entry);
 }
 EXPORT_SYMBOL_GPL(async_schedule_node_domain);