net: veth: use newly added page pool API for veth with xdp
[platform/kernel/linux-rpi.git] / kernel / async.c
index b2c4ba5..673bba6 100644 (file)
@@ -145,6 +145,39 @@ static void async_run_entry_fn(struct work_struct *work)
        wake_up(&async_done);
 }
 
+static async_cookie_t __async_schedule_node_domain(async_func_t func,
+                                                  void *data, int node,
+                                                  struct async_domain *domain,
+                                                  struct async_entry *entry)
+{
+       async_cookie_t newcookie;
+       unsigned long flags;
+
+       INIT_LIST_HEAD(&entry->domain_list);
+       INIT_LIST_HEAD(&entry->global_list);
+       INIT_WORK(&entry->work, async_run_entry_fn);
+       entry->func = func;
+       entry->data = data;
+       entry->domain = domain;
+
+       spin_lock_irqsave(&async_lock, flags);
+
+       /* allocate cookie and queue */
+       newcookie = entry->cookie = next_cookie++;
+
+       list_add_tail(&entry->domain_list, &domain->pending);
+       if (domain->registered)
+               list_add_tail(&entry->global_list, &async_global_pending);
+
+       atomic_inc(&entry_count);
+       spin_unlock_irqrestore(&async_lock, flags);
+
+       /* schedule for execution */
+       queue_work_node(node, system_unbound_wq, &entry->work);
+
+       return newcookie;
+}
+
 /**
  * async_schedule_node_domain - NUMA specific version of async_schedule_domain
  * @func: function to execute asynchronously
@@ -186,29 +219,8 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
                func(data, newcookie);
                return newcookie;
        }
-       INIT_LIST_HEAD(&entry->domain_list);
-       INIT_LIST_HEAD(&entry->global_list);
-       INIT_WORK(&entry->work, async_run_entry_fn);
-       entry->func = func;
-       entry->data = data;
-       entry->domain = domain;
-
-       spin_lock_irqsave(&async_lock, flags);
-
-       /* allocate cookie and queue */
-       newcookie = entry->cookie = next_cookie++;
-
-       list_add_tail(&entry->domain_list, &domain->pending);
-       if (domain->registered)
-               list_add_tail(&entry->global_list, &async_global_pending);
-
-       atomic_inc(&entry_count);
-       spin_unlock_irqrestore(&async_lock, flags);
-
-       /* schedule for execution */
-       queue_work_node(node, system_unbound_wq, &entry->work);
 
-       return newcookie;
+       return __async_schedule_node_domain(func, data, node, domain, entry);
 }
 EXPORT_SYMBOL_GPL(async_schedule_node_domain);
 
@@ -232,6 +244,35 @@ async_cookie_t async_schedule_node(async_func_t func, void *data, int node)
 EXPORT_SYMBOL_GPL(async_schedule_node);
 
 /**
+ * async_schedule_dev_nocall - A simplified variant of async_schedule_dev()
+ * @func: function to execute asynchronously
+ * @dev: device argument to be passed to function
+ *
+ * @dev is used as both the argument for the function and to provide NUMA
+ * context for where to run the function.
+ *
+ * If the asynchronous execution of @func is scheduled successfully, return
+ * true. Otherwise, do nothing and return false, unlike async_schedule_dev()
+ * that will run the function synchronously then.
+ */
+bool async_schedule_dev_nocall(async_func_t func, struct device *dev)
+{
+       struct async_entry *entry;
+
+       entry = kzalloc(sizeof(struct async_entry), GFP_KERNEL);
+
+       /* Give up if there is no memory or too much work. */
+       if (!entry || atomic_read(&entry_count) > MAX_WORK) {
+               kfree(entry);
+               return false;
+       }
+
+       __async_schedule_node_domain(func, dev, dev_to_node(dev),
+                                    &async_dfl_domain, entry);
+       return true;
+}
+
+/**
  * async_synchronize_full - synchronize all asynchronous function calls
  *
  * This function waits until all asynchronous function calls have been done.