{
struct io_cb_cancel_data *data = cancel_data;
struct io_wqe *wqe = data->wqe;
+ unsigned long flags;
bool ret = false;
/*
* Hold the lock to avoid ->cur_work going out of scope, caller
* may deference the passed in work.
*/
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
if (worker->cur_work &&
data->cancel(worker->cur_work, data->caller_data)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
return ret;
}
.caller_data = cancel_data,
};
struct io_wq_work *work;
+ unsigned long flags;
bool found = false;
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (cancel(work, cancel_data)) {
list_del(&work->list);
break;
}
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
struct io_wq_work *cwork)
{
struct io_wq_work *work;
+ unsigned long flags;
bool found = false;
cwork->flags |= IO_WQ_WORK_CANCEL;
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
*/
- spin_lock_irq(&wqe->lock);
+ spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (work == cwork) {
list_del(&work->list);
break;
}
}
- spin_unlock_irq(&wqe->lock);
+ spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;