}
/**
+ * create_and_start_worker - create and start a worker for a pool
+ * @pool: the target pool
+ *
+ * Create and start a new worker for @pool.
+ */
+static int create_and_start_worker(struct worker_pool *pool)
+{
+ struct worker *worker;
+
+ worker = create_worker(pool);
+ if (worker) {
+ spin_lock_irq(&pool->lock);
+ start_worker(worker);
+ spin_unlock_irq(&pool->lock);
+ }
+
+ return worker ? 0 : -ENOMEM;
+}
+
+/**
* destroy_worker - destroy a workqueue worker
* @worker: worker to be destroyed
*
static DEFINE_MUTEX(create_mutex);
u32 hash = wqattrs_hash(attrs);
struct worker_pool *pool;
- struct worker *worker;
mutex_lock(&create_mutex);
goto fail;
/* create and start the initial worker */
- worker = create_worker(pool);
- if (!worker)
+ if (create_and_start_worker(pool) < 0)
goto fail;
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
-
/* install */
spin_lock_irq(&workqueue_lock);
hash_add(unbound_pool_hash, &pool->hash_node, hash);
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
for_each_cpu_worker_pool(pool, cpu) {
- struct worker *worker;
-
if (pool->nr_workers)
continue;
-
- worker = create_worker(pool);
- if (!worker)
+ if (create_and_start_worker(pool) < 0)
return NOTIFY_BAD;
-
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
}
break;
struct worker_pool *pool;
for_each_cpu_worker_pool(pool, cpu) {
- struct worker *worker;
-
pool->flags &= ~POOL_DISASSOCIATED;
-
- worker = create_worker(pool);
- BUG_ON(!worker);
- spin_lock_irq(&pool->lock);
- start_worker(worker);
- spin_unlock_irq(&pool->lock);
+ BUG_ON(create_and_start_worker(pool) < 0);
}
}