* hotter than drain_workqueue() and already looks at @wq->flags.
* Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!wq->nr_drainers++)
wq->flags |= WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
reflush:
flush_workqueue(wq);
goto reflush;
}
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!--wq->nr_drainers)
wq->flags &= ~WQ_DRAINING;
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(drain_workqueue);
* list. Grab it, set max_active accordingly and add the new
* workqueue to workqueues list.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_pwq_cpu(cpu, wq)
list_add(&wq->list, &workqueues);
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return wq;
err:
* wq list is used to freeze wq, remove from list after
* flushing is complete in case freeze races us.
*/
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
list_del(&wq->list);
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
wq->saved_max_active = max_active;
struct pool_workqueue *pwq = get_pwq(cpu, wq);
struct worker_pool *pool = pwq->pool;
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
if (!(wq->flags & WQ_FREEZABLE) ||
!(pool->flags & POOL_FREEZING))
pwq_set_max_active(pwq, max_active);
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
{
unsigned int cpu;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
struct workqueue_struct *wq;
for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
WARN_ON_ONCE(pool->flags & POOL_FREEZING);
pool->flags |= POOL_FREEZING;
pwq->max_active = 0;
}
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
}
}
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
/**
unsigned int cpu;
bool busy = false;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
WARN_ON_ONCE(!workqueue_freezing);
}
}
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
return busy;
}
{
unsigned int cpu;
- spin_lock(&workqueue_lock);
+ spin_lock_irq(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
struct workqueue_struct *wq;
for_each_std_worker_pool(pool, cpu) {
- spin_lock_irq(&pool->lock);
+ spin_lock(&pool->lock);
WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
pool->flags &= ~POOL_FREEZING;
wake_up_worker(pool);
- spin_unlock_irq(&pool->lock);
+ spin_unlock(&pool->lock);
}
}
workqueue_freezing = false;
out_unlock:
- spin_unlock(&workqueue_lock);
+ spin_unlock_irq(&workqueue_lock);
}
#endif /* CONFIG_FREEZER */